Statistics
| Branch: | Tag: | Revision:

root / lib / client / gnt_cluster.py @ b6267745

History | View | Annotate | Download (45.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21
"""Cluster related commands"""
22

    
23
# pylint: disable=W0401,W0613,W0614,C0103
24
# W0401: Wildcard import ganeti.cli
25
# W0613: Unused argument, since all functions follow the same API
26
# W0614: Unused import %s from wildcard import (since we need cli)
27
# C0103: Invalid name gnt-cluster
28

    
29
import os.path
30
import time
31
import OpenSSL
32
import itertools
33

    
34
from ganeti.cli import *
35
from ganeti import opcodes
36
from ganeti import constants
37
from ganeti import errors
38
from ganeti import utils
39
from ganeti import bootstrap
40
from ganeti import ssh
41
from ganeti import objects
42
from ganeti import uidpool
43
from ganeti import compat
44
from ganeti import netutils
45

    
46

    
47
ON_OPT = cli_option("--on", default=False,
48
                    action="store_true", dest="on",
49
                    help="Recover from an EPO")
50

    
51
GROUPS_OPT = cli_option("--groups", default=False,
52
                    action="store_true", dest="groups",
53
                    help="Arguments are node groups instead of nodes")
54

    
55
_EPO_PING_INTERVAL = 30 # 30 seconds between pings
56
_EPO_PING_TIMEOUT = 1 # 1 second
57
_EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
58

    
59

    
60
@UsesRPC
61
def InitCluster(opts, args):
62
  """Initialize the cluster.
63

64
  @param opts: the command line options selected by the user
65
  @type args: list
66
  @param args: should contain only one element, the desired
67
      cluster name
68
  @rtype: int
69
  @return: the desired exit code
70

71
  """
72
  if not opts.lvm_storage and opts.vg_name:
73
    ToStderr("Options --no-lvm-storage and --vg-name conflict.")
74
    return 1
75

    
76
  vg_name = opts.vg_name
77
  if opts.lvm_storage and not opts.vg_name:
78
    vg_name = constants.DEFAULT_VG
79

    
80
  if not opts.drbd_storage and opts.drbd_helper:
81
    ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
82
    return 1
83

    
84
  drbd_helper = opts.drbd_helper
85
  if opts.drbd_storage and not opts.drbd_helper:
86
    drbd_helper = constants.DEFAULT_DRBD_HELPER
87

    
88
  master_netdev = opts.master_netdev
89
  if master_netdev is None:
90
    master_netdev = constants.DEFAULT_BRIDGE
91

    
92
  hvlist = opts.enabled_hypervisors
93
  if hvlist is None:
94
    hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
95
  hvlist = hvlist.split(",")
96

    
97
  hvparams = dict(opts.hvparams)
98
  beparams = opts.beparams
99
  nicparams = opts.nicparams
100

    
101
  # prepare beparams dict
102
  beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
103
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
104

    
105
  # prepare nicparams dict
106
  nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
107
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
108

    
109
  # prepare ndparams dict
110
  if opts.ndparams is None:
111
    ndparams = dict(constants.NDC_DEFAULTS)
112
  else:
113
    ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
114
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
115

    
116
  # prepare hvparams dict
117
  for hv in constants.HYPER_TYPES:
118
    if hv not in hvparams:
119
      hvparams[hv] = {}
120
    hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
121
    utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
122

    
123
  if opts.candidate_pool_size is None:
124
    opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
125

    
126
  if opts.mac_prefix is None:
127
    opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
128

    
129
  uid_pool = opts.uid_pool
130
  if uid_pool is not None:
131
    uid_pool = uidpool.ParseUidPool(uid_pool)
132

    
133
  if opts.prealloc_wipe_disks is None:
134
    opts.prealloc_wipe_disks = False
135

    
136
  try:
137
    primary_ip_version = int(opts.primary_ip_version)
138
  except (ValueError, TypeError), err:
139
    ToStderr("Invalid primary ip version value: %s" % str(err))
140
    return 1
141

    
142
  bootstrap.InitCluster(cluster_name=args[0],
143
                        secondary_ip=opts.secondary_ip,
144
                        vg_name=vg_name,
145
                        mac_prefix=opts.mac_prefix,
146
                        master_netdev=master_netdev,
147
                        file_storage_dir=opts.file_storage_dir,
148
                        shared_file_storage_dir=opts.shared_file_storage_dir,
149
                        enabled_hypervisors=hvlist,
150
                        hvparams=hvparams,
151
                        beparams=beparams,
152
                        nicparams=nicparams,
153
                        ndparams=ndparams,
154
                        candidate_pool_size=opts.candidate_pool_size,
155
                        modify_etc_hosts=opts.modify_etc_hosts,
156
                        modify_ssh_setup=opts.modify_ssh_setup,
157
                        maintain_node_health=opts.maintain_node_health,
158
                        drbd_helper=drbd_helper,
159
                        uid_pool=uid_pool,
160
                        default_iallocator=opts.default_iallocator,
161
                        primary_ip_version=primary_ip_version,
162
                        prealloc_wipe_disks=opts.prealloc_wipe_disks,
163
                        )
164
  op = opcodes.OpClusterPostInit()
165
  SubmitOpCode(op, opts=opts)
166
  return 0
167

    
168

    
169
@UsesRPC
170
def DestroyCluster(opts, args):
171
  """Destroy the cluster.
172

173
  @param opts: the command line options selected by the user
174
  @type args: list
175
  @param args: should be an empty list
176
  @rtype: int
177
  @return: the desired exit code
178

179
  """
180
  if not opts.yes_do_it:
181
    ToStderr("Destroying a cluster is irreversible. If you really want"
182
             " destroy this cluster, supply the --yes-do-it option.")
183
    return 1
184

    
185
  op = opcodes.OpClusterDestroy()
186
  master = SubmitOpCode(op, opts=opts)
187
  # if we reached this, the opcode didn't fail; we can proceed to
188
  # shutdown all the daemons
189
  bootstrap.FinalizeClusterDestroy(master)
190
  return 0
191

    
192

    
193
def RenameCluster(opts, args):
194
  """Rename the cluster.
195

196
  @param opts: the command line options selected by the user
197
  @type args: list
198
  @param args: should contain only one element, the new cluster name
199
  @rtype: int
200
  @return: the desired exit code
201

202
  """
203
  cl = GetClient()
204

    
205
  (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
206

    
207
  new_name = args[0]
208
  if not opts.force:
209
    usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
210
                " connected over the network to the cluster name, the"
211
                " operation is very dangerous as the IP address will be"
212
                " removed from the node and the change may not go through."
213
                " Continue?") % (cluster_name, new_name)
214
    if not AskUser(usertext):
215
      return 1
216

    
217
  op = opcodes.OpClusterRename(name=new_name)
218
  result = SubmitOpCode(op, opts=opts, cl=cl)
219

    
220
  if result:
221
    ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
222

    
223
  return 0
224

    
225

    
226
def RedistributeConfig(opts, args):
227
  """Forces push of the cluster configuration.
228

229
  @param opts: the command line options selected by the user
230
  @type args: list
231
  @param args: empty list
232
  @rtype: int
233
  @return: the desired exit code
234

235
  """
236
  op = opcodes.OpClusterRedistConf()
237
  SubmitOrSend(op, opts)
238
  return 0
239

    
240

    
241
def ShowClusterVersion(opts, args):
242
  """Write version of ganeti software to the standard output.
243

244
  @param opts: the command line options selected by the user
245
  @type args: list
246
  @param args: should be an empty list
247
  @rtype: int
248
  @return: the desired exit code
249

250
  """
251
  cl = GetClient()
252
  result = cl.QueryClusterInfo()
253
  ToStdout("Software version: %s", result["software_version"])
254
  ToStdout("Internode protocol: %s", result["protocol_version"])
255
  ToStdout("Configuration format: %s", result["config_version"])
256
  ToStdout("OS api version: %s", result["os_api_version"])
257
  ToStdout("Export interface: %s", result["export_version"])
258
  return 0
259

    
260

    
261
def ShowClusterMaster(opts, args):
262
  """Write name of master node to the standard output.
263

264
  @param opts: the command line options selected by the user
265
  @type args: list
266
  @param args: should be an empty list
267
  @rtype: int
268
  @return: the desired exit code
269

270
  """
271
  master = bootstrap.GetMaster()
272
  ToStdout(master)
273
  return 0
274

    
275

    
276
def _PrintGroupedParams(paramsdict, level=1, roman=False):
277
  """Print Grouped parameters (be, nic, disk) by group.
278

279
  @type paramsdict: dict of dicts
280
  @param paramsdict: {group: {param: value, ...}, ...}
281
  @type level: int
282
  @param level: Level of indention
283

284
  """
285
  indent = "  " * level
286
  for item, val in sorted(paramsdict.items()):
287
    if isinstance(val, dict):
288
      ToStdout("%s- %s:", indent, item)
289
      _PrintGroupedParams(val, level=level + 1, roman=roman)
290
    elif roman and isinstance(val, int):
291
      ToStdout("%s  %s: %s", indent, item, compat.TryToRoman(val))
292
    else:
293
      ToStdout("%s  %s: %s", indent, item, val)
294

    
295

    
296
def ShowClusterConfig(opts, args):
297
  """Shows cluster information.
298

299
  @param opts: the command line options selected by the user
300
  @type args: list
301
  @param args: should be an empty list
302
  @rtype: int
303
  @return: the desired exit code
304

305
  """
306
  cl = GetClient()
307
  result = cl.QueryClusterInfo()
308

    
309
  ToStdout("Cluster name: %s", result["name"])
310
  ToStdout("Cluster UUID: %s", result["uuid"])
311

    
312
  ToStdout("Creation time: %s", utils.FormatTime(result["ctime"]))
313
  ToStdout("Modification time: %s", utils.FormatTime(result["mtime"]))
314

    
315
  ToStdout("Master node: %s", result["master"])
316

    
317
  ToStdout("Architecture (this node): %s (%s)",
318
           result["architecture"][0], result["architecture"][1])
319

    
320
  if result["tags"]:
321
    tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
322
  else:
323
    tags = "(none)"
324

    
325
  ToStdout("Tags: %s", tags)
326

    
327
  ToStdout("Default hypervisor: %s", result["default_hypervisor"])
328
  ToStdout("Enabled hypervisors: %s",
329
           utils.CommaJoin(result["enabled_hypervisors"]))
330

    
331
  ToStdout("Hypervisor parameters:")
332
  _PrintGroupedParams(result["hvparams"])
333

    
334
  ToStdout("OS-specific hypervisor parameters:")
335
  _PrintGroupedParams(result["os_hvp"])
336

    
337
  ToStdout("OS parameters:")
338
  _PrintGroupedParams(result["osparams"])
339

    
340
  ToStdout("Hidden OSes: %s", utils.CommaJoin(result["hidden_os"]))
341
  ToStdout("Blacklisted OSes: %s", utils.CommaJoin(result["blacklisted_os"]))
342

    
343
  ToStdout("Cluster parameters:")
344
  ToStdout("  - candidate pool size: %s",
345
            compat.TryToRoman(result["candidate_pool_size"],
346
                              convert=opts.roman_integers))
347
  ToStdout("  - master netdev: %s", result["master_netdev"])
348
  ToStdout("  - lvm volume group: %s", result["volume_group_name"])
349
  if result["reserved_lvs"]:
350
    reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
351
  else:
352
    reserved_lvs = "(none)"
353
  ToStdout("  - lvm reserved volumes: %s", reserved_lvs)
354
  ToStdout("  - drbd usermode helper: %s", result["drbd_usermode_helper"])
355
  ToStdout("  - file storage path: %s", result["file_storage_dir"])
356
  ToStdout("  - shared file storage path: %s",
357
           result["shared_file_storage_dir"])
358
  ToStdout("  - maintenance of node health: %s",
359
           result["maintain_node_health"])
360
  ToStdout("  - uid pool: %s",
361
            uidpool.FormatUidPool(result["uid_pool"],
362
                                  roman=opts.roman_integers))
363
  ToStdout("  - default instance allocator: %s", result["default_iallocator"])
364
  ToStdout("  - primary ip version: %d", result["primary_ip_version"])
365
  ToStdout("  - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
366
  ToStdout("  - OS search path: %s", utils.CommaJoin(constants.OS_SEARCH_PATH))
367

    
368
  ToStdout("Default node parameters:")
369
  _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
370

    
371
  ToStdout("Default instance parameters:")
372
  _PrintGroupedParams(result["beparams"], roman=opts.roman_integers)
373

    
374
  ToStdout("Default nic parameters:")
375
  _PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
376

    
377
  return 0
378

    
379

    
380
def ClusterCopyFile(opts, args):
381
  """Copy a file from master to some nodes.
382

383
  @param opts: the command line options selected by the user
384
  @type args: list
385
  @param args: should contain only one element, the path of
386
      the file to be copied
387
  @rtype: int
388
  @return: the desired exit code
389

390
  """
391
  filename = args[0]
392
  if not os.path.exists(filename):
393
    raise errors.OpPrereqError("No such filename '%s'" % filename,
394
                               errors.ECODE_INVAL)
395

    
396
  cl = GetClient()
397

    
398
  cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
399

    
400
  results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
401
                           secondary_ips=opts.use_replication_network,
402
                           nodegroup=opts.nodegroup)
403

    
404
  srun = ssh.SshRunner(cluster_name=cluster_name)
405
  for node in results:
406
    if not srun.CopyFileToNode(node, filename):
407
      ToStderr("Copy of file %s to node %s failed", filename, node)
408

    
409
  return 0
410

    
411

    
412
def RunClusterCommand(opts, args):
413
  """Run a command on some nodes.
414

415
  @param opts: the command line options selected by the user
416
  @type args: list
417
  @param args: should contain the command to be run and its arguments
418
  @rtype: int
419
  @return: the desired exit code
420

421
  """
422
  cl = GetClient()
423

    
424
  command = " ".join(args)
425

    
426
  nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl, nodegroup=opts.nodegroup)
427

    
428
  cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
429
                                                    "master_node"])
430

    
431
  srun = ssh.SshRunner(cluster_name=cluster_name)
432

    
433
  # Make sure master node is at list end
434
  if master_node in nodes:
435
    nodes.remove(master_node)
436
    nodes.append(master_node)
437

    
438
  for name in nodes:
439
    result = srun.Run(name, "root", command)
440
    ToStdout("------------------------------------------------")
441
    ToStdout("node: %s", name)
442
    ToStdout("%s", result.output)
443
    ToStdout("return code = %s", result.exit_code)
444

    
445
  return 0
446

    
447

    
448
def VerifyCluster(opts, args):
449
  """Verify integrity of cluster, performing various test on nodes.
450

451
  @param opts: the command line options selected by the user
452
  @type args: list
453
  @param args: should be an empty list
454
  @rtype: int
455
  @return: the desired exit code
456

457
  """
458
  skip_checks = []
459

    
460
  if opts.skip_nplusone_mem:
461
    skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
462

    
463
  cl = GetClient()
464

    
465
  op = opcodes.OpClusterVerify(verbose=opts.verbose,
466
                               error_codes=opts.error_codes,
467
                               debug_simulate_errors=opts.simulate_errors,
468
                               skip_checks=skip_checks,
469
                               group_name=opts.nodegroup)
470
  result = SubmitOpCode(op, cl=cl, opts=opts)
471

    
472
  # Keep track of submitted jobs
473
  jex = JobExecutor(cl=cl, opts=opts)
474

    
475
  for (status, job_id) in result[constants.JOB_IDS_KEY]:
476
    jex.AddJobId(None, status, job_id)
477

    
478
  results = jex.GetResults()
479

    
480
  (bad_jobs, bad_results) = \
481
    map(len,
482
        # Convert iterators to lists
483
        map(list,
484
            # Count errors
485
            map(compat.partial(itertools.ifilterfalse, bool),
486
                # Convert result to booleans in a tuple
487
                zip(*((job_success, len(op_results) == 1 and op_results[0])
488
                      for (job_success, op_results) in results)))))
489

    
490
  if bad_jobs == 0 and bad_results == 0:
491
    rcode = constants.EXIT_SUCCESS
492
  else:
493
    rcode = constants.EXIT_FAILURE
494
    if bad_jobs > 0:
495
      ToStdout("%s job(s) failed while verifying the cluster.", bad_jobs)
496

    
497
  return rcode
498

    
499

    
500
def VerifyDisks(opts, args):
501
  """Verify integrity of cluster disks.
502

503
  @param opts: the command line options selected by the user
504
  @type args: list
505
  @param args: should be an empty list
506
  @rtype: int
507
  @return: the desired exit code
508

509
  """
510
  cl = GetClient()
511

    
512
  op = opcodes.OpClusterVerifyDisks()
513

    
514
  result = SubmitOpCode(op, cl=cl, opts=opts)
515

    
516
  # Keep track of submitted jobs
517
  jex = JobExecutor(cl=cl, opts=opts)
518

    
519
  for (status, job_id) in result[constants.JOB_IDS_KEY]:
520
    jex.AddJobId(None, status, job_id)
521

    
522
  retcode = constants.EXIT_SUCCESS
523

    
524
  for (status, result) in jex.GetResults():
525
    if not status:
526
      ToStdout("Job failed: %s", result)
527
      continue
528

    
529
    ((bad_nodes, instances, missing), ) = result
530

    
531
    for node, text in bad_nodes.items():
532
      ToStdout("Error gathering data on node %s: %s",
533
               node, utils.SafeEncode(text[-400:]))
534
      retcode = constants.EXIT_FAILURE
535
      ToStdout("You need to fix these nodes first before fixing instances")
536

    
537
    for iname in instances:
538
      if iname in missing:
539
        continue
540
      op = opcodes.OpInstanceActivateDisks(instance_name=iname)
541
      try:
542
        ToStdout("Activating disks for instance '%s'", iname)
543
        SubmitOpCode(op, opts=opts, cl=cl)
544
      except errors.GenericError, err:
545
        nret, msg = FormatError(err)
546
        retcode |= nret
547
        ToStderr("Error activating disks for instance %s: %s", iname, msg)
548

    
549
    if missing:
550
      for iname, ival in missing.iteritems():
551
        all_missing = compat.all(x[0] in bad_nodes for x in ival)
552
        if all_missing:
553
          ToStdout("Instance %s cannot be verified as it lives on"
554
                   " broken nodes", iname)
555
        else:
556
          ToStdout("Instance %s has missing logical volumes:", iname)
557
          ival.sort()
558
          for node, vol in ival:
559
            if node in bad_nodes:
560
              ToStdout("\tbroken node %s /dev/%s", node, vol)
561
            else:
562
              ToStdout("\t%s /dev/%s", node, vol)
563

    
564
      ToStdout("You need to replace or recreate disks for all the above"
565
               " instances if this message persists after fixing broken nodes.")
566
      retcode = constants.EXIT_FAILURE
567

    
568
  return retcode
569

    
570

    
571
def RepairDiskSizes(opts, args):
572
  """Verify sizes of cluster disks.
573

574
  @param opts: the command line options selected by the user
575
  @type args: list
576
  @param args: optional list of instances to restrict check to
577
  @rtype: int
578
  @return: the desired exit code
579

580
  """
581
  op = opcodes.OpClusterRepairDiskSizes(instances=args)
582
  SubmitOpCode(op, opts=opts)
583

    
584

    
585
@UsesRPC
586
def MasterFailover(opts, args):
587
  """Failover the master node.
588

589
  This command, when run on a non-master node, will cause the current
590
  master to cease being master, and the non-master to become new
591
  master.
592

593
  @param opts: the command line options selected by the user
594
  @type args: list
595
  @param args: should be an empty list
596
  @rtype: int
597
  @return: the desired exit code
598

599
  """
600
  if opts.no_voting:
601
    usertext = ("This will perform the failover even if most other nodes"
602
                " are down, or if this node is outdated. This is dangerous"
603
                " as it can lead to a non-consistent cluster. Check the"
604
                " gnt-cluster(8) man page before proceeding. Continue?")
605
    if not AskUser(usertext):
606
      return 1
607

    
608
  return bootstrap.MasterFailover(no_voting=opts.no_voting)
609

    
610

    
611
def MasterPing(opts, args):
612
  """Checks if the master is alive.
613

614
  @param opts: the command line options selected by the user
615
  @type args: list
616
  @param args: should be an empty list
617
  @rtype: int
618
  @return: the desired exit code
619

620
  """
621
  try:
622
    cl = GetClient()
623
    cl.QueryClusterInfo()
624
    return 0
625
  except Exception: # pylint: disable=W0703
626
    return 1
627

    
628

    
629
def SearchTags(opts, args):
630
  """Searches the tags on all the cluster.
631

632
  @param opts: the command line options selected by the user
633
  @type args: list
634
  @param args: should contain only one element, the tag pattern
635
  @rtype: int
636
  @return: the desired exit code
637

638
  """
639
  op = opcodes.OpTagsSearch(pattern=args[0])
640
  result = SubmitOpCode(op, opts=opts)
641
  if not result:
642
    return 1
643
  result = list(result)
644
  result.sort()
645
  for path, tag in result:
646
    ToStdout("%s %s", path, tag)
647

    
648

    
649
def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
650
  """Reads and verifies an X509 certificate.
651

652
  @type cert_filename: string
653
  @param cert_filename: the path of the file containing the certificate to
654
                        verify encoded in PEM format
655
  @type verify_private_key: bool
656
  @param verify_private_key: whether to verify the private key in addition to
657
                             the public certificate
658
  @rtype: string
659
  @return: a string containing the PEM-encoded certificate.
660

661
  """
662
  try:
663
    pem = utils.ReadFile(cert_filename)
664
  except IOError, err:
665
    raise errors.X509CertError(cert_filename,
666
                               "Unable to read certificate: %s" % str(err))
667

    
668
  try:
669
    OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
670
  except Exception, err:
671
    raise errors.X509CertError(cert_filename,
672
                               "Unable to load certificate: %s" % str(err))
673

    
674
  if verify_private_key:
675
    try:
676
      OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
677
    except Exception, err:
678
      raise errors.X509CertError(cert_filename,
679
                                 "Unable to load private key: %s" % str(err))
680

    
681
  return pem
682

    
683

    
684
def _RenewCrypto(new_cluster_cert, new_rapi_cert, #pylint: disable=R0911
685
                 rapi_cert_filename, new_spice_cert, spice_cert_filename,
686
                 spice_cacert_filename, new_confd_hmac_key, new_cds,
687
                 cds_filename, force):
688
  """Renews cluster certificates, keys and secrets.
689

690
  @type new_cluster_cert: bool
691
  @param new_cluster_cert: Whether to generate a new cluster certificate
692
  @type new_rapi_cert: bool
693
  @param new_rapi_cert: Whether to generate a new RAPI certificate
694
  @type rapi_cert_filename: string
695
  @param rapi_cert_filename: Path to file containing new RAPI certificate
696
  @type new_spice_cert: bool
697
  @param new_spice_cert: Whether to generate a new SPICE certificate
698
  @type spice_cert_filename: string
699
  @param spice_cert_filename: Path to file containing new SPICE certificate
700
  @type spice_cacert_filename: string
701
  @param spice_cacert_filename: Path to file containing the certificate of the
702
                                CA that signed the SPICE certificate
703
  @type new_confd_hmac_key: bool
704
  @param new_confd_hmac_key: Whether to generate a new HMAC key
705
  @type new_cds: bool
706
  @param new_cds: Whether to generate a new cluster domain secret
707
  @type cds_filename: string
708
  @param cds_filename: Path to file containing new cluster domain secret
709
  @type force: bool
710
  @param force: Whether to ask user for confirmation
711

712
  """
713
  if new_rapi_cert and rapi_cert_filename:
714
    ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
715
             " options can be specified at the same time.")
716
    return 1
717

    
718
  if new_cds and cds_filename:
719
    ToStderr("Only one of the --new-cluster-domain-secret and"
720
             " --cluster-domain-secret options can be specified at"
721
             " the same time.")
722
    return 1
723

    
724
  if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
725
    ToStderr("When using --new-spice-certificate, the --spice-certificate"
726
             " and --spice-ca-certificate must not be used.")
727
    return 1
728

    
729
  if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
730
    ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
731
             " specified.")
732
    return 1
733

    
734
  rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
735
  try:
736
    if rapi_cert_filename:
737
      rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
738
    if spice_cert_filename:
739
      spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
740
      spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
741
  except errors.X509CertError, err:
742
    ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
743
    return 1
744

    
745
  if cds_filename:
746
    try:
747
      cds = utils.ReadFile(cds_filename)
748
    except Exception, err: # pylint: disable=W0703
749
      ToStderr("Can't load new cluster domain secret from %s: %s" %
750
               (cds_filename, str(err)))
751
      return 1
752
  else:
753
    cds = None
754

    
755
  if not force:
756
    usertext = ("This requires all daemons on all nodes to be restarted and"
757
                " may take some time. Continue?")
758
    if not AskUser(usertext):
759
      return 1
760

    
761
  def _RenewCryptoInner(ctx):
762
    ctx.feedback_fn("Updating certificates and keys")
763
    bootstrap.GenerateClusterCrypto(new_cluster_cert,
764
                                    new_rapi_cert,
765
                                    new_spice_cert,
766
                                    new_confd_hmac_key,
767
                                    new_cds,
768
                                    rapi_cert_pem=rapi_cert_pem,
769
                                    spice_cert_pem=spice_cert_pem,
770
                                    spice_cacert_pem=spice_cacert_pem,
771
                                    cds=cds)
772

    
773
    files_to_copy = []
774

    
775
    if new_cluster_cert:
776
      files_to_copy.append(constants.NODED_CERT_FILE)
777

    
778
    if new_rapi_cert or rapi_cert_pem:
779
      files_to_copy.append(constants.RAPI_CERT_FILE)
780

    
781
    if new_spice_cert or spice_cert_pem:
782
      files_to_copy.append(constants.SPICE_CERT_FILE)
783
      files_to_copy.append(constants.SPICE_CACERT_FILE)
784

    
785
    if new_confd_hmac_key:
786
      files_to_copy.append(constants.CONFD_HMAC_KEY)
787

    
788
    if new_cds or cds:
789
      files_to_copy.append(constants.CLUSTER_DOMAIN_SECRET_FILE)
790

    
791
    if files_to_copy:
792
      for node_name in ctx.nonmaster_nodes:
793
        ctx.feedback_fn("Copying %s to %s" %
794
                        (", ".join(files_to_copy), node_name))
795
        for file_name in files_to_copy:
796
          ctx.ssh.CopyFileToNode(node_name, file_name)
797

    
798
  RunWhileClusterStopped(ToStdout, _RenewCryptoInner)
799

    
800
  ToStdout("All requested certificates and keys have been replaced."
801
           " Running \"gnt-cluster verify\" now is recommended.")
802

    
803
  return 0
804

    
805

    
806
def RenewCrypto(opts, args):
807
  """Renews cluster certificates, keys and secrets.
808

809
  """
810
  return _RenewCrypto(opts.new_cluster_cert,
811
                      opts.new_rapi_cert,
812
                      opts.rapi_cert,
813
                      opts.new_spice_cert,
814
                      opts.spice_cert,
815
                      opts.spice_cacert,
816
                      opts.new_confd_hmac_key,
817
                      opts.new_cluster_domain_secret,
818
                      opts.cluster_domain_secret,
819
                      opts.force)
820

    
821

    
822
def SetClusterParams(opts, args):
823
  """Modify the cluster.
824

825
  @param opts: the command line options selected by the user
826
  @type args: list
827
  @param args: should be an empty list
828
  @rtype: int
829
  @return: the desired exit code
830

831
  """
832
  if not (not opts.lvm_storage or opts.vg_name or
833
          not opts.drbd_storage or opts.drbd_helper or
834
          opts.enabled_hypervisors or opts.hvparams or
835
          opts.beparams or opts.nicparams or opts.ndparams or
836
          opts.candidate_pool_size is not None or
837
          opts.uid_pool is not None or
838
          opts.maintain_node_health is not None or
839
          opts.add_uids is not None or
840
          opts.remove_uids is not None or
841
          opts.default_iallocator is not None or
842
          opts.reserved_lvs is not None or
843
          opts.master_netdev is not None or
844
          opts.prealloc_wipe_disks is not None):
845
    ToStderr("Please give at least one of the parameters.")
846
    return 1
847

    
848
  vg_name = opts.vg_name
849
  if not opts.lvm_storage and opts.vg_name:
850
    ToStderr("Options --no-lvm-storage and --vg-name conflict.")
851
    return 1
852

    
853
  if not opts.lvm_storage:
854
    vg_name = ""
855

    
856
  drbd_helper = opts.drbd_helper
857
  if not opts.drbd_storage and opts.drbd_helper:
858
    ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
859
    return 1
860

    
861
  if not opts.drbd_storage:
862
    drbd_helper = ""
863

    
864
  hvlist = opts.enabled_hypervisors
865
  if hvlist is not None:
866
    hvlist = hvlist.split(",")
867

    
868
  # a list of (name, dict) we can pass directly to dict() (or [])
869
  hvparams = dict(opts.hvparams)
870
  for hv_params in hvparams.values():
871
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
872

    
873
  beparams = opts.beparams
874
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
875

    
876
  nicparams = opts.nicparams
877
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
878

    
879
  ndparams = opts.ndparams
880
  if ndparams is not None:
881
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
882

    
883
  mnh = opts.maintain_node_health
884

    
885
  uid_pool = opts.uid_pool
886
  if uid_pool is not None:
887
    uid_pool = uidpool.ParseUidPool(uid_pool)
888

    
889
  add_uids = opts.add_uids
890
  if add_uids is not None:
891
    add_uids = uidpool.ParseUidPool(add_uids)
892

    
893
  remove_uids = opts.remove_uids
894
  if remove_uids is not None:
895
    remove_uids = uidpool.ParseUidPool(remove_uids)
896

    
897
  if opts.reserved_lvs is not None:
898
    if opts.reserved_lvs == "":
899
      opts.reserved_lvs = []
900
    else:
901
      opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
902

    
903
  op = opcodes.OpClusterSetParams(vg_name=vg_name,
904
                                  drbd_helper=drbd_helper,
905
                                  enabled_hypervisors=hvlist,
906
                                  hvparams=hvparams,
907
                                  os_hvp=None,
908
                                  beparams=beparams,
909
                                  nicparams=nicparams,
910
                                  ndparams=ndparams,
911
                                  candidate_pool_size=opts.candidate_pool_size,
912
                                  maintain_node_health=mnh,
913
                                  uid_pool=uid_pool,
914
                                  add_uids=add_uids,
915
                                  remove_uids=remove_uids,
916
                                  default_iallocator=opts.default_iallocator,
917
                                  prealloc_wipe_disks=opts.prealloc_wipe_disks,
918
                                  master_netdev=opts.master_netdev,
919
                                  reserved_lvs=opts.reserved_lvs)
920
  SubmitOpCode(op, opts=opts)
921
  return 0
922

    
923

    
924
def QueueOps(opts, args):
925
  """Queue operations.
926

927
  @param opts: the command line options selected by the user
928
  @type args: list
929
  @param args: should contain only one element, the subcommand
930
  @rtype: int
931
  @return: the desired exit code
932

933
  """
934
  command = args[0]
935
  client = GetClient()
936
  if command in ("drain", "undrain"):
937
    drain_flag = command == "drain"
938
    client.SetQueueDrainFlag(drain_flag)
939
  elif command == "info":
940
    result = client.QueryConfigValues(["drain_flag"])
941
    if result[0]:
942
      val = "set"
943
    else:
944
      val = "unset"
945
    ToStdout("The drain flag is %s" % val)
946
  else:
947
    raise errors.OpPrereqError("Command '%s' is not valid." % command,
948
                               errors.ECODE_INVAL)
949

    
950
  return 0
951

    
952

    
953
def _ShowWatcherPause(until):
954
  if until is None or until < time.time():
955
    ToStdout("The watcher is not paused.")
956
  else:
957
    ToStdout("The watcher is paused until %s.", time.ctime(until))
958

    
959

    
960
def WatcherOps(opts, args):
961
  """Watcher operations.
962

963
  @param opts: the command line options selected by the user
964
  @type args: list
965
  @param args: should contain only one element, the subcommand
966
  @rtype: int
967
  @return: the desired exit code
968

969
  """
970
  command = args[0]
971
  client = GetClient()
972

    
973
  if command == "continue":
974
    client.SetWatcherPause(None)
975
    ToStdout("The watcher is no longer paused.")
976

    
977
  elif command == "pause":
978
    if len(args) < 2:
979
      raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL)
980

    
981
    result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
982
    _ShowWatcherPause(result)
983

    
984
  elif command == "info":
985
    result = client.QueryConfigValues(["watcher_pause"])
986
    _ShowWatcherPause(result[0])
987

    
988
  else:
989
    raise errors.OpPrereqError("Command '%s' is not valid." % command,
990
                               errors.ECODE_INVAL)
991

    
992
  return 0
993

    
994

    
995
def _OobPower(opts, node_list, power):
996
  """Puts the node in the list to desired power state.
997

998
  @param opts: The command line options selected by the user
999
  @param node_list: The list of nodes to operate on
1000
  @param power: True if they should be powered on, False otherwise
1001
  @return: The success of the operation (none failed)
1002

1003
  """
1004
  if power:
1005
    command = constants.OOB_POWER_ON
1006
  else:
1007
    command = constants.OOB_POWER_OFF
1008

    
1009
  op = opcodes.OpOobCommand(node_names=node_list,
1010
                            command=command,
1011
                            ignore_status=True,
1012
                            timeout=opts.oob_timeout,
1013
                            power_delay=opts.power_delay)
1014
  result = SubmitOpCode(op, opts=opts)
1015
  errs = 0
1016
  for node_result in result:
1017
    (node_tuple, data_tuple) = node_result
1018
    (_, node_name) = node_tuple
1019
    (data_status, _) = data_tuple
1020
    if data_status != constants.RS_NORMAL:
1021
      assert data_status != constants.RS_UNAVAIL
1022
      errs += 1
1023
      ToStderr("There was a problem changing power for %s, please investigate",
1024
               node_name)
1025

    
1026
  if errs > 0:
1027
    return False
1028

    
1029
  return True
1030

    
1031

    
1032
def _InstanceStart(opts, inst_list, start):
1033
  """Puts the instances in the list to desired state.
1034

1035
  @param opts: The command line options selected by the user
1036
  @param inst_list: The list of instances to operate on
1037
  @param start: True if they should be started, False for shutdown
1038
  @return: The success of the operation (none failed)
1039

1040
  """
1041
  if start:
1042
    opcls = opcodes.OpInstanceStartup
1043
    text_submit, text_success, text_failed = ("startup", "started", "starting")
1044
  else:
1045
    opcls = compat.partial(opcodes.OpInstanceShutdown,
1046
                           timeout=opts.shutdown_timeout)
1047
    text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
1048

    
1049
  jex = JobExecutor(opts=opts)
1050

    
1051
  for inst in inst_list:
1052
    ToStdout("Submit %s of instance %s", text_submit, inst)
1053
    op = opcls(instance_name=inst)
1054
    jex.QueueJob(inst, op)
1055

    
1056
  results = jex.GetResults()
1057
  bad_cnt = len([1 for (success, _) in results if not success])
1058

    
1059
  if bad_cnt == 0:
1060
    ToStdout("All instances have been %s successfully", text_success)
1061
  else:
1062
    ToStderr("There were errors while %s instances:\n"
1063
             "%d error(s) out of %d instance(s)", text_failed, bad_cnt,
1064
             len(results))
1065
    return False
1066

    
1067
  return True
1068

    
1069

    
1070
class _RunWhenNodesReachableHelper:
1071
  """Helper class to make shared internal state sharing easier.
1072

1073
  @ivar success: Indicates if all action_cb calls were successful
1074

1075
  """
1076
  def __init__(self, node_list, action_cb, node2ip, port, feedback_fn,
1077
               _ping_fn=netutils.TcpPing, _sleep_fn=time.sleep):
1078
    """Init the object.
1079

1080
    @param node_list: The list of nodes to be reachable
1081
    @param action_cb: Callback called when a new host is reachable
1082
    @type node2ip: dict
1083
    @param node2ip: Node to ip mapping
1084
    @param port: The port to use for the TCP ping
1085
    @param feedback_fn: The function used for feedback
1086
    @param _ping_fn: Function to check reachabilty (for unittest use only)
1087
    @param _sleep_fn: Function to sleep (for unittest use only)
1088

1089
    """
1090
    self.down = set(node_list)
1091
    self.up = set()
1092
    self.node2ip = node2ip
1093
    self.success = True
1094
    self.action_cb = action_cb
1095
    self.port = port
1096
    self.feedback_fn = feedback_fn
1097
    self._ping_fn = _ping_fn
1098
    self._sleep_fn = _sleep_fn
1099

    
1100
  def __call__(self):
1101
    """When called we run action_cb.
1102

1103
    @raises utils.RetryAgain: When there are still down nodes
1104

1105
    """
1106
    if not self.action_cb(self.up):
1107
      self.success = False
1108

    
1109
    if self.down:
1110
      raise utils.RetryAgain()
1111
    else:
1112
      return self.success
1113

    
1114
  def Wait(self, secs):
1115
    """Checks if a host is up or waits remaining seconds.
1116

1117
    @param secs: The secs remaining
1118

1119
    """
1120
    start = time.time()
1121
    for node in self.down:
1122
      if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT,
1123
                       live_port_needed=True):
1124
        self.feedback_fn("Node %s became available" % node)
1125
        self.up.add(node)
1126
        self.down -= self.up
1127
        # If we have a node available there is the possibility to run the
1128
        # action callback successfully, therefore we don't wait and return
1129
        return
1130

    
1131
    self._sleep_fn(max(0.0, start + secs - time.time()))
1132

    
1133

    
1134
def _RunWhenNodesReachable(node_list, action_cb, interval):
1135
  """Run action_cb when nodes become reachable.
1136

1137
  @param node_list: The list of nodes to be reachable
1138
  @param action_cb: Callback called when a new host is reachable
1139
  @param interval: The earliest time to retry
1140

1141
  """
1142
  client = GetClient()
1143
  cluster_info = client.QueryClusterInfo()
1144
  if cluster_info["primary_ip_version"] == constants.IP4_VERSION:
1145
    family = netutils.IPAddress.family
1146
  else:
1147
    family = netutils.IP6Address.family
1148

    
1149
  node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
1150
                 for node in node_list)
1151

    
1152
  port = netutils.GetDaemonPort(constants.NODED)
1153
  helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port,
1154
                                        ToStdout)
1155

    
1156
  try:
1157
    return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
1158
                       wait_fn=helper.Wait)
1159
  except utils.RetryTimeout:
1160
    ToStderr("Time exceeded while waiting for nodes to become reachable"
1161
             " again:\n  - %s", "  - ".join(helper.down))
1162
    return False
1163

    
1164

    
1165
def _MaybeInstanceStartup(opts, inst_map, nodes_online,
1166
                          _instance_start_fn=_InstanceStart):
1167
  """Start the instances conditional based on node_states.
1168

1169
  @param opts: The command line options selected by the user
1170
  @param inst_map: A dict of inst -> nodes mapping
1171
  @param nodes_online: A list of nodes online
1172
  @param _instance_start_fn: Callback to start instances (unittest use only)
1173
  @return: Success of the operation on all instances
1174

1175
  """
1176
  start_inst_list = []
1177
  for (inst, nodes) in inst_map.items():
1178
    if not (nodes - nodes_online):
1179
      # All nodes the instance lives on are back online
1180
      start_inst_list.append(inst)
1181

    
1182
  for inst in start_inst_list:
1183
    del inst_map[inst]
1184

    
1185
  if start_inst_list:
1186
    return _instance_start_fn(opts, start_inst_list, True)
1187

    
1188
  return True
1189

    
1190

    
1191
def _EpoOn(opts, full_node_list, node_list, inst_map):
1192
  """Does the actual power on.
1193

1194
  @param opts: The command line options selected by the user
1195
  @param full_node_list: All nodes to operate on (includes nodes not supporting
1196
                         OOB)
1197
  @param node_list: The list of nodes to operate on (all need to support OOB)
1198
  @param inst_map: A dict of inst -> nodes mapping
1199
  @return: The desired exit status
1200

1201
  """
1202
  if node_list and not _OobPower(opts, node_list, False):
1203
    ToStderr("Not all nodes seem to get back up, investigate and start"
1204
             " manually if needed")
1205

    
1206
  # Wait for the nodes to be back up
1207
  action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
1208

    
1209
  ToStdout("Waiting until all nodes are available again")
1210
  if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL):
1211
    ToStderr("Please investigate and start stopped instances manually")
1212
    return constants.EXIT_FAILURE
1213

    
1214
  return constants.EXIT_SUCCESS
1215

    
1216

    
1217
def _EpoOff(opts, node_list, inst_map):
1218
  """Does the actual power off.
1219

1220
  @param opts: The command line options selected by the user
1221
  @param node_list: The list of nodes to operate on (all need to support OOB)
1222
  @param inst_map: A dict of inst -> nodes mapping
1223
  @return: The desired exit status
1224

1225
  """
1226
  if not _InstanceStart(opts, inst_map.keys(), False):
1227
    ToStderr("Please investigate and stop instances manually before continuing")
1228
    return constants.EXIT_FAILURE
1229

    
1230
  if not node_list:
1231
    return constants.EXIT_SUCCESS
1232

    
1233
  if _OobPower(opts, node_list, False):
1234
    return constants.EXIT_SUCCESS
1235
  else:
1236
    return constants.EXIT_FAILURE
1237

    
1238

    
1239
def Epo(opts, args):
1240
  """EPO operations.
1241

1242
  @param opts: the command line options selected by the user
1243
  @type args: list
1244
  @param args: should contain only one element, the subcommand
1245
  @rtype: int
1246
  @return: the desired exit code
1247

1248
  """
1249
  if opts.groups and opts.show_all:
1250
    ToStderr("Only one of --groups or --all are allowed")
1251
    return constants.EXIT_FAILURE
1252
  elif args and opts.show_all:
1253
    ToStderr("Arguments in combination with --all are not allowed")
1254
    return constants.EXIT_FAILURE
1255

    
1256
  client = GetClient()
1257

    
1258
  if opts.groups:
1259
    node_query_list = itertools.chain(*client.QueryGroups(names=args,
1260
                                                          fields=["node_list"],
1261
                                                          use_locking=False))
1262
  else:
1263
    node_query_list = args
1264

    
1265
  result = client.QueryNodes(names=node_query_list,
1266
                             fields=["name", "master", "pinst_list",
1267
                                     "sinst_list", "powered", "offline"],
1268
                             use_locking=False)
1269
  node_list = []
1270
  inst_map = {}
1271
  for (idx, (node, master, pinsts, sinsts, powered,
1272
             offline)) in enumerate(result):
1273
    # Normalize the node_query_list as well
1274
    if not opts.show_all:
1275
      node_query_list[idx] = node
1276
    if not offline:
1277
      for inst in (pinsts + sinsts):
1278
        if inst in inst_map:
1279
          if not master:
1280
            inst_map[inst].add(node)
1281
        elif master:
1282
          inst_map[inst] = set()
1283
        else:
1284
          inst_map[inst] = set([node])
1285

    
1286
    if master and opts.on:
1287
      # We ignore the master for turning on the machines, in fact we are
1288
      # already operating on the master at this point :)
1289
      continue
1290
    elif master and not opts.show_all:
1291
      ToStderr("%s is the master node, please do a master-failover to another"
1292
               " node not affected by the EPO or use --all if you intend to"
1293
               " shutdown the whole cluster", node)
1294
      return constants.EXIT_FAILURE
1295
    elif powered is None:
1296
      ToStdout("Node %s does not support out-of-band handling, it can not be"
1297
               " handled in a fully automated manner", node)
1298
    elif powered == opts.on:
1299
      ToStdout("Node %s is already in desired power state, skipping", node)
1300
    elif not offline or (offline and powered):
1301
      node_list.append(node)
1302

    
1303
  if not opts.force and not ConfirmOperation(node_query_list, "nodes", "epo"):
1304
    return constants.EXIT_FAILURE
1305

    
1306
  if opts.on:
1307
    return _EpoOn(opts, node_query_list, node_list, inst_map)
1308
  else:
1309
    return _EpoOff(opts, node_list, inst_map)
1310

    
1311

    
1312
commands = {
1313
  "init": (
1314
    InitCluster, [ArgHost(min=1, max=1)],
1315
    [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
1316
     HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT,
1317
     NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT,
1318
     SECONDARY_IP_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
1319
     UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
1320
     DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
1321
     NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT],
1322
    "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
1323
  "destroy": (
1324
    DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
1325
    "", "Destroy cluster"),
1326
  "rename": (
1327
    RenameCluster, [ArgHost(min=1, max=1)],
1328
    [FORCE_OPT, DRY_RUN_OPT],
1329
    "<new_name>",
1330
    "Renames the cluster"),
1331
  "redist-conf": (
1332
    RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
1333
    "", "Forces a push of the configuration file and ssconf files"
1334
    " to the nodes in the cluster"),
1335
  "verify": (
1336
    VerifyCluster, ARGS_NONE,
1337
    [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
1338
     DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT],
1339
    "", "Does a check on the cluster configuration"),
1340
  "verify-disks": (
1341
    VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
1342
    "", "Does a check on the cluster disk status"),
1343
  "repair-disk-sizes": (
1344
    RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
1345
    "", "Updates mismatches in recorded disk sizes"),
1346
  "master-failover": (
1347
    MasterFailover, ARGS_NONE, [NOVOTING_OPT],
1348
    "", "Makes the current node the master"),
1349
  "master-ping": (
1350
    MasterPing, ARGS_NONE, [],
1351
    "", "Checks if the master is alive"),
1352
  "version": (
1353
    ShowClusterVersion, ARGS_NONE, [],
1354
    "", "Shows the cluster version"),
1355
  "getmaster": (
1356
    ShowClusterMaster, ARGS_NONE, [],
1357
    "", "Shows the cluster master"),
1358
  "copyfile": (
1359
    ClusterCopyFile, [ArgFile(min=1, max=1)],
1360
    [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
1361
    "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
1362
  "command": (
1363
    RunClusterCommand, [ArgCommand(min=1)],
1364
    [NODE_LIST_OPT, NODEGROUP_OPT],
1365
    "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
1366
  "info": (
1367
    ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
1368
    "[--roman]", "Show cluster configuration"),
1369
  "list-tags": (
1370
    ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
1371
  "add-tags": (
1372
    AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
1373
    "tag...", "Add tags to the cluster"),
1374
  "remove-tags": (
1375
    RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
1376
    "tag...", "Remove tags from the cluster"),
1377
  "search-tags": (
1378
    SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
1379
    "Searches the tags on all objects on"
1380
    " the cluster for a given pattern (regex)"),
1381
  "queue": (
1382
    QueueOps,
1383
    [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
1384
    [], "drain|undrain|info", "Change queue properties"),
1385
  "watcher": (
1386
    WatcherOps,
1387
    [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
1388
     ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
1389
    [],
1390
    "{pause <timespec>|continue|info}", "Change watcher properties"),
1391
  "modify": (
1392
    SetClusterParams, ARGS_NONE,
1393
    [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
1394
     NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
1395
     UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT, DRBD_HELPER_OPT,
1396
     NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT, RESERVED_LVS_OPT,
1397
     DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT],
1398
    "[opts...]",
1399
    "Alters the parameters of the cluster"),
1400
  "renew-crypto": (
1401
    RenewCrypto, ARGS_NONE,
1402
    [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
1403
     NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
1404
     NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
1405
     NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT],
1406
    "[opts...]",
1407
    "Renews cluster certificates, keys and secrets"),
1408
  "epo": (
1409
    Epo, [ArgUnknown()],
1410
    [FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT,
1411
     SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
1412
    "[opts...] [args]",
1413
    "Performs an emergency power-off on given args"),
1414
  }
1415

    
1416

    
1417
#: dictionary with aliases for commands
1418
aliases = {
1419
  "masterfailover": "master-failover",
1420
}
1421

    
1422

    
1423
def Main():
1424
  return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER},
1425
                     aliases=aliases)