Statistics
| Branch: | Tag: | Revision:

root / lib / client / gnt_cluster.py @ 3e0ed18c

History | View | Annotate | Download (44.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21
"""Cluster related commands"""
22

    
23
# pylint: disable=W0401,W0613,W0614,C0103
24
# W0401: Wildcard import ganeti.cli
25
# W0613: Unused argument, since all functions follow the same API
26
# W0614: Unused import %s from wildcard import (since we need cli)
27
# C0103: Invalid name gnt-cluster
28

    
29
import os.path
30
import time
31
import OpenSSL
32
import itertools
33

    
34
from ganeti.cli import *
35
from ganeti import opcodes
36
from ganeti import constants
37
from ganeti import errors
38
from ganeti import utils
39
from ganeti import bootstrap
40
from ganeti import ssh
41
from ganeti import objects
42
from ganeti import uidpool
43
from ganeti import compat
44
from ganeti import netutils
45

    
46

    
47
ON_OPT = cli_option("--on", default=False,
48
                    action="store_true", dest="on",
49
                    help="Recover from an EPO")
50

    
51
GROUPS_OPT = cli_option("--groups", default=False,
52
                    action="store_true", dest="groups",
53
                    help="Arguments are node groups instead of nodes")
54

    
55
_EPO_PING_INTERVAL = 30 # 30 seconds between pings
56
_EPO_PING_TIMEOUT = 1 # 1 second
57
_EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
58

    
59

    
60
@UsesRPC
61
def InitCluster(opts, args):
62
  """Initialize the cluster.
63

64
  @param opts: the command line options selected by the user
65
  @type args: list
66
  @param args: should contain only one element, the desired
67
      cluster name
68
  @rtype: int
69
  @return: the desired exit code
70

71
  """
72
  if not opts.lvm_storage and opts.vg_name:
73
    ToStderr("Options --no-lvm-storage and --vg-name conflict.")
74
    return 1
75

    
76
  vg_name = opts.vg_name
77
  if opts.lvm_storage and not opts.vg_name:
78
    vg_name = constants.DEFAULT_VG
79

    
80
  if not opts.drbd_storage and opts.drbd_helper:
81
    ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
82
    return 1
83

    
84
  drbd_helper = opts.drbd_helper
85
  if opts.drbd_storage and not opts.drbd_helper:
86
    drbd_helper = constants.DEFAULT_DRBD_HELPER
87

    
88
  master_netdev = opts.master_netdev
89
  if master_netdev is None:
90
    master_netdev = constants.DEFAULT_BRIDGE
91

    
92
  hvlist = opts.enabled_hypervisors
93
  if hvlist is None:
94
    hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
95
  hvlist = hvlist.split(",")
96

    
97
  hvparams = dict(opts.hvparams)
98
  beparams = opts.beparams
99
  nicparams = opts.nicparams
100

    
101
  # prepare beparams dict
102
  beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
103
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
104

    
105
  # prepare nicparams dict
106
  nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
107
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
108

    
109
  # prepare ndparams dict
110
  if opts.ndparams is None:
111
    ndparams = dict(constants.NDC_DEFAULTS)
112
  else:
113
    ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
114
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
115

    
116
  # prepare hvparams dict
117
  for hv in constants.HYPER_TYPES:
118
    if hv not in hvparams:
119
      hvparams[hv] = {}
120
    hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
121
    utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
122

    
123
  if opts.candidate_pool_size is None:
124
    opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
125

    
126
  if opts.mac_prefix is None:
127
    opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
128

    
129
  uid_pool = opts.uid_pool
130
  if uid_pool is not None:
131
    uid_pool = uidpool.ParseUidPool(uid_pool)
132

    
133
  if opts.prealloc_wipe_disks is None:
134
    opts.prealloc_wipe_disks = False
135

    
136
  try:
137
    primary_ip_version = int(opts.primary_ip_version)
138
  except (ValueError, TypeError), err:
139
    ToStderr("Invalid primary ip version value: %s" % str(err))
140
    return 1
141

    
142
  bootstrap.InitCluster(cluster_name=args[0],
143
                        secondary_ip=opts.secondary_ip,
144
                        vg_name=vg_name,
145
                        mac_prefix=opts.mac_prefix,
146
                        master_netdev=master_netdev,
147
                        file_storage_dir=opts.file_storage_dir,
148
                        shared_file_storage_dir=opts.shared_file_storage_dir,
149
                        enabled_hypervisors=hvlist,
150
                        hvparams=hvparams,
151
                        beparams=beparams,
152
                        nicparams=nicparams,
153
                        ndparams=ndparams,
154
                        candidate_pool_size=opts.candidate_pool_size,
155
                        modify_etc_hosts=opts.modify_etc_hosts,
156
                        modify_ssh_setup=opts.modify_ssh_setup,
157
                        maintain_node_health=opts.maintain_node_health,
158
                        drbd_helper=drbd_helper,
159
                        uid_pool=uid_pool,
160
                        default_iallocator=opts.default_iallocator,
161
                        primary_ip_version=primary_ip_version,
162
                        prealloc_wipe_disks=opts.prealloc_wipe_disks,
163
                        )
164
  op = opcodes.OpClusterPostInit()
165
  SubmitOpCode(op, opts=opts)
166
  return 0
167

    
168

    
169
@UsesRPC
170
def DestroyCluster(opts, args):
171
  """Destroy the cluster.
172

173
  @param opts: the command line options selected by the user
174
  @type args: list
175
  @param args: should be an empty list
176
  @rtype: int
177
  @return: the desired exit code
178

179
  """
180
  if not opts.yes_do_it:
181
    ToStderr("Destroying a cluster is irreversible. If you really want"
182
             " destroy this cluster, supply the --yes-do-it option.")
183
    return 1
184

    
185
  op = opcodes.OpClusterDestroy()
186
  master = SubmitOpCode(op, opts=opts)
187
  # if we reached this, the opcode didn't fail; we can proceed to
188
  # shutdown all the daemons
189
  bootstrap.FinalizeClusterDestroy(master)
190
  return 0
191

    
192

    
193
def RenameCluster(opts, args):
194
  """Rename the cluster.
195

196
  @param opts: the command line options selected by the user
197
  @type args: list
198
  @param args: should contain only one element, the new cluster name
199
  @rtype: int
200
  @return: the desired exit code
201

202
  """
203
  cl = GetClient()
204

    
205
  (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
206

    
207
  new_name = args[0]
208
  if not opts.force:
209
    usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
210
                " connected over the network to the cluster name, the"
211
                " operation is very dangerous as the IP address will be"
212
                " removed from the node and the change may not go through."
213
                " Continue?") % (cluster_name, new_name)
214
    if not AskUser(usertext):
215
      return 1
216

    
217
  op = opcodes.OpClusterRename(name=new_name)
218
  result = SubmitOpCode(op, opts=opts, cl=cl)
219

    
220
  if result:
221
    ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
222

    
223
  return 0
224

    
225

    
226
def ActivateMasterIp(opts, args):
227
  """Activates the master IP.
228

229
  """
230
  op = opcodes.OpClusterActivateMasterIp()
231
  SubmitOpCode(op)
232
  return 0
233

    
234

    
235
def DeactivateMasterIp(opts, args):
236
  """Deactivates the master IP.
237

238
  """
239
  if not opts.confirm:
240
    usertext = ("This will disable the master IP. All the open connections to"
241
                " the master IP will be closed. To reach the master you will"
242
                " need to use its node IP."
243
                " Continue?")
244
    if not AskUser(usertext):
245
      return 1
246

    
247
  op = opcodes.OpClusterDeactivateMasterIp()
248
  SubmitOpCode(op)
249
  return 0
250

    
251

    
252
def RedistributeConfig(opts, args):
253
  """Forces push of the cluster configuration.
254

255
  @param opts: the command line options selected by the user
256
  @type args: list
257
  @param args: empty list
258
  @rtype: int
259
  @return: the desired exit code
260

261
  """
262
  op = opcodes.OpClusterRedistConf()
263
  SubmitOrSend(op, opts)
264
  return 0
265

    
266

    
267
def ShowClusterVersion(opts, args):
268
  """Write version of ganeti software to the standard output.
269

270
  @param opts: the command line options selected by the user
271
  @type args: list
272
  @param args: should be an empty list
273
  @rtype: int
274
  @return: the desired exit code
275

276
  """
277
  cl = GetClient()
278
  result = cl.QueryClusterInfo()
279
  ToStdout("Software version: %s", result["software_version"])
280
  ToStdout("Internode protocol: %s", result["protocol_version"])
281
  ToStdout("Configuration format: %s", result["config_version"])
282
  ToStdout("OS api version: %s", result["os_api_version"])
283
  ToStdout("Export interface: %s", result["export_version"])
284
  return 0
285

    
286

    
287
def ShowClusterMaster(opts, args):
288
  """Write name of master node to the standard output.
289

290
  @param opts: the command line options selected by the user
291
  @type args: list
292
  @param args: should be an empty list
293
  @rtype: int
294
  @return: the desired exit code
295

296
  """
297
  master = bootstrap.GetMaster()
298
  ToStdout(master)
299
  return 0
300

    
301

    
302
def _PrintGroupedParams(paramsdict, level=1, roman=False):
303
  """Print Grouped parameters (be, nic, disk) by group.
304

305
  @type paramsdict: dict of dicts
306
  @param paramsdict: {group: {param: value, ...}, ...}
307
  @type level: int
308
  @param level: Level of indention
309

310
  """
311
  indent = "  " * level
312
  for item, val in sorted(paramsdict.items()):
313
    if isinstance(val, dict):
314
      ToStdout("%s- %s:", indent, item)
315
      _PrintGroupedParams(val, level=level + 1, roman=roman)
316
    elif roman and isinstance(val, int):
317
      ToStdout("%s  %s: %s", indent, item, compat.TryToRoman(val))
318
    else:
319
      ToStdout("%s  %s: %s", indent, item, val)
320

    
321

    
322
def ShowClusterConfig(opts, args):
323
  """Shows cluster information.
324

325
  @param opts: the command line options selected by the user
326
  @type args: list
327
  @param args: should be an empty list
328
  @rtype: int
329
  @return: the desired exit code
330

331
  """
332
  cl = GetClient()
333
  result = cl.QueryClusterInfo()
334

    
335
  ToStdout("Cluster name: %s", result["name"])
336
  ToStdout("Cluster UUID: %s", result["uuid"])
337

    
338
  ToStdout("Creation time: %s", utils.FormatTime(result["ctime"]))
339
  ToStdout("Modification time: %s", utils.FormatTime(result["mtime"]))
340

    
341
  ToStdout("Master node: %s", result["master"])
342

    
343
  ToStdout("Architecture (this node): %s (%s)",
344
           result["architecture"][0], result["architecture"][1])
345

    
346
  if result["tags"]:
347
    tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
348
  else:
349
    tags = "(none)"
350

    
351
  ToStdout("Tags: %s", tags)
352

    
353
  ToStdout("Default hypervisor: %s", result["default_hypervisor"])
354
  ToStdout("Enabled hypervisors: %s",
355
           utils.CommaJoin(result["enabled_hypervisors"]))
356

    
357
  ToStdout("Hypervisor parameters:")
358
  _PrintGroupedParams(result["hvparams"])
359

    
360
  ToStdout("OS-specific hypervisor parameters:")
361
  _PrintGroupedParams(result["os_hvp"])
362

    
363
  ToStdout("OS parameters:")
364
  _PrintGroupedParams(result["osparams"])
365

    
366
  ToStdout("Hidden OSes: %s", utils.CommaJoin(result["hidden_os"]))
367
  ToStdout("Blacklisted OSes: %s", utils.CommaJoin(result["blacklisted_os"]))
368

    
369
  ToStdout("Cluster parameters:")
370
  ToStdout("  - candidate pool size: %s",
371
            compat.TryToRoman(result["candidate_pool_size"],
372
                              convert=opts.roman_integers))
373
  ToStdout("  - master netdev: %s", result["master_netdev"])
374
  ToStdout("  - lvm volume group: %s", result["volume_group_name"])
375
  if result["reserved_lvs"]:
376
    reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
377
  else:
378
    reserved_lvs = "(none)"
379
  ToStdout("  - lvm reserved volumes: %s", reserved_lvs)
380
  ToStdout("  - drbd usermode helper: %s", result["drbd_usermode_helper"])
381
  ToStdout("  - file storage path: %s", result["file_storage_dir"])
382
  ToStdout("  - shared file storage path: %s",
383
           result["shared_file_storage_dir"])
384
  ToStdout("  - maintenance of node health: %s",
385
           result["maintain_node_health"])
386
  ToStdout("  - uid pool: %s",
387
            uidpool.FormatUidPool(result["uid_pool"],
388
                                  roman=opts.roman_integers))
389
  ToStdout("  - default instance allocator: %s", result["default_iallocator"])
390
  ToStdout("  - primary ip version: %d", result["primary_ip_version"])
391
  ToStdout("  - preallocation wipe disks: %s", result["prealloc_wipe_disks"])
392
  ToStdout("  - OS search path: %s", utils.CommaJoin(constants.OS_SEARCH_PATH))
393

    
394
  ToStdout("Default node parameters:")
395
  _PrintGroupedParams(result["ndparams"], roman=opts.roman_integers)
396

    
397
  ToStdout("Default instance parameters:")
398
  _PrintGroupedParams(result["beparams"], roman=opts.roman_integers)
399

    
400
  ToStdout("Default nic parameters:")
401
  _PrintGroupedParams(result["nicparams"], roman=opts.roman_integers)
402

    
403
  return 0
404

    
405

    
406
def ClusterCopyFile(opts, args):
407
  """Copy a file from master to some nodes.
408

409
  @param opts: the command line options selected by the user
410
  @type args: list
411
  @param args: should contain only one element, the path of
412
      the file to be copied
413
  @rtype: int
414
  @return: the desired exit code
415

416
  """
417
  filename = args[0]
418
  if not os.path.exists(filename):
419
    raise errors.OpPrereqError("No such filename '%s'" % filename,
420
                               errors.ECODE_INVAL)
421

    
422
  cl = GetClient()
423

    
424
  cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
425

    
426
  results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
427
                           secondary_ips=opts.use_replication_network,
428
                           nodegroup=opts.nodegroup)
429

    
430
  srun = ssh.SshRunner(cluster_name=cluster_name)
431
  for node in results:
432
    if not srun.CopyFileToNode(node, filename):
433
      ToStderr("Copy of file %s to node %s failed", filename, node)
434

    
435
  return 0
436

    
437

    
438
def RunClusterCommand(opts, args):
439
  """Run a command on some nodes.
440

441
  @param opts: the command line options selected by the user
442
  @type args: list
443
  @param args: should contain the command to be run and its arguments
444
  @rtype: int
445
  @return: the desired exit code
446

447
  """
448
  cl = GetClient()
449

    
450
  command = " ".join(args)
451

    
452
  nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl, nodegroup=opts.nodegroup)
453

    
454
  cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
455
                                                    "master_node"])
456

    
457
  srun = ssh.SshRunner(cluster_name=cluster_name)
458

    
459
  # Make sure master node is at list end
460
  if master_node in nodes:
461
    nodes.remove(master_node)
462
    nodes.append(master_node)
463

    
464
  for name in nodes:
465
    result = srun.Run(name, "root", command)
466
    ToStdout("------------------------------------------------")
467
    ToStdout("node: %s", name)
468
    ToStdout("%s", result.output)
469
    ToStdout("return code = %s", result.exit_code)
470

    
471
  return 0
472

    
473

    
474
def VerifyCluster(opts, args):
475
  """Verify integrity of cluster, performing various test on nodes.
476

477
  @param opts: the command line options selected by the user
478
  @type args: list
479
  @param args: should be an empty list
480
  @rtype: int
481
  @return: the desired exit code
482

483
  """
484
  skip_checks = []
485

    
486
  if opts.skip_nplusone_mem:
487
    skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
488

    
489
  cl = GetClient()
490

    
491
  op = opcodes.OpClusterVerify(verbose=opts.verbose,
492
                               error_codes=opts.error_codes,
493
                               debug_simulate_errors=opts.simulate_errors,
494
                               skip_checks=skip_checks,
495
                               group_name=opts.nodegroup)
496
  result = SubmitOpCode(op, cl=cl, opts=opts)
497

    
498
  # Keep track of submitted jobs
499
  jex = JobExecutor(cl=cl, opts=opts)
500

    
501
  for (status, job_id) in result[constants.JOB_IDS_KEY]:
502
    jex.AddJobId(None, status, job_id)
503

    
504
  results = jex.GetResults()
505

    
506
  (bad_jobs, bad_results) = \
507
    map(len,
508
        # Convert iterators to lists
509
        map(list,
510
            # Count errors
511
            map(compat.partial(itertools.ifilterfalse, bool),
512
                # Convert result to booleans in a tuple
513
                zip(*((job_success, len(op_results) == 1 and op_results[0])
514
                      for (job_success, op_results) in results)))))
515

    
516
  if bad_jobs == 0 and bad_results == 0:
517
    rcode = constants.EXIT_SUCCESS
518
  else:
519
    rcode = constants.EXIT_FAILURE
520
    if bad_jobs > 0:
521
      ToStdout("%s job(s) failed while verifying the cluster.", bad_jobs)
522

    
523
  return rcode
524

    
525

    
526
def VerifyDisks(opts, args):
527
  """Verify integrity of cluster disks.
528

529
  @param opts: the command line options selected by the user
530
  @type args: list
531
  @param args: should be an empty list
532
  @rtype: int
533
  @return: the desired exit code
534

535
  """
536
  cl = GetClient()
537

    
538
  op = opcodes.OpClusterVerifyDisks()
539

    
540
  result = SubmitOpCode(op, cl=cl, opts=opts)
541

    
542
  # Keep track of submitted jobs
543
  jex = JobExecutor(cl=cl, opts=opts)
544

    
545
  for (status, job_id) in result[constants.JOB_IDS_KEY]:
546
    jex.AddJobId(None, status, job_id)
547

    
548
  retcode = constants.EXIT_SUCCESS
549

    
550
  for (status, result) in jex.GetResults():
551
    if not status:
552
      ToStdout("Job failed: %s", result)
553
      continue
554

    
555
    ((bad_nodes, instances, missing), ) = result
556

    
557
    for node, text in bad_nodes.items():
558
      ToStdout("Error gathering data on node %s: %s",
559
               node, utils.SafeEncode(text[-400:]))
560
      retcode = constants.EXIT_FAILURE
561
      ToStdout("You need to fix these nodes first before fixing instances")
562

    
563
    for iname in instances:
564
      if iname in missing:
565
        continue
566
      op = opcodes.OpInstanceActivateDisks(instance_name=iname)
567
      try:
568
        ToStdout("Activating disks for instance '%s'", iname)
569
        SubmitOpCode(op, opts=opts, cl=cl)
570
      except errors.GenericError, err:
571
        nret, msg = FormatError(err)
572
        retcode |= nret
573
        ToStderr("Error activating disks for instance %s: %s", iname, msg)
574

    
575
    if missing:
576
      for iname, ival in missing.iteritems():
577
        all_missing = compat.all(x[0] in bad_nodes for x in ival)
578
        if all_missing:
579
          ToStdout("Instance %s cannot be verified as it lives on"
580
                   " broken nodes", iname)
581
        else:
582
          ToStdout("Instance %s has missing logical volumes:", iname)
583
          ival.sort()
584
          for node, vol in ival:
585
            if node in bad_nodes:
586
              ToStdout("\tbroken node %s /dev/%s", node, vol)
587
            else:
588
              ToStdout("\t%s /dev/%s", node, vol)
589

    
590
      ToStdout("You need to replace or recreate disks for all the above"
591
               " instances if this message persists after fixing broken nodes.")
592
      retcode = constants.EXIT_FAILURE
593

    
594
  return retcode
595

    
596

    
597
def RepairDiskSizes(opts, args):
598
  """Verify sizes of cluster disks.
599

600
  @param opts: the command line options selected by the user
601
  @type args: list
602
  @param args: optional list of instances to restrict check to
603
  @rtype: int
604
  @return: the desired exit code
605

606
  """
607
  op = opcodes.OpClusterRepairDiskSizes(instances=args)
608
  SubmitOpCode(op, opts=opts)
609

    
610

    
611
@UsesRPC
612
def MasterFailover(opts, args):
613
  """Failover the master node.
614

615
  This command, when run on a non-master node, will cause the current
616
  master to cease being master, and the non-master to become new
617
  master.
618

619
  @param opts: the command line options selected by the user
620
  @type args: list
621
  @param args: should be an empty list
622
  @rtype: int
623
  @return: the desired exit code
624

625
  """
626
  if opts.no_voting:
627
    usertext = ("This will perform the failover even if most other nodes"
628
                " are down, or if this node is outdated. This is dangerous"
629
                " as it can lead to a non-consistent cluster. Check the"
630
                " gnt-cluster(8) man page before proceeding. Continue?")
631
    if not AskUser(usertext):
632
      return 1
633

    
634
  return bootstrap.MasterFailover(no_voting=opts.no_voting)
635

    
636

    
637
def MasterPing(opts, args):
638
  """Checks if the master is alive.
639

640
  @param opts: the command line options selected by the user
641
  @type args: list
642
  @param args: should be an empty list
643
  @rtype: int
644
  @return: the desired exit code
645

646
  """
647
  try:
648
    cl = GetClient()
649
    cl.QueryClusterInfo()
650
    return 0
651
  except Exception: # pylint: disable=W0703
652
    return 1
653

    
654

    
655
def SearchTags(opts, args):
656
  """Searches the tags on all the cluster.
657

658
  @param opts: the command line options selected by the user
659
  @type args: list
660
  @param args: should contain only one element, the tag pattern
661
  @rtype: int
662
  @return: the desired exit code
663

664
  """
665
  op = opcodes.OpTagsSearch(pattern=args[0])
666
  result = SubmitOpCode(op, opts=opts)
667
  if not result:
668
    return 1
669
  result = list(result)
670
  result.sort()
671
  for path, tag in result:
672
    ToStdout("%s %s", path, tag)
673

    
674

    
675
def _RenewCrypto(new_cluster_cert, new_rapi_cert, rapi_cert_filename,
676
                 new_confd_hmac_key, new_cds, cds_filename,
677
                 force):
678
  """Renews cluster certificates, keys and secrets.
679

680
  @type new_cluster_cert: bool
681
  @param new_cluster_cert: Whether to generate a new cluster certificate
682
  @type new_rapi_cert: bool
683
  @param new_rapi_cert: Whether to generate a new RAPI certificate
684
  @type rapi_cert_filename: string
685
  @param rapi_cert_filename: Path to file containing new RAPI certificate
686
  @type new_confd_hmac_key: bool
687
  @param new_confd_hmac_key: Whether to generate a new HMAC key
688
  @type new_cds: bool
689
  @param new_cds: Whether to generate a new cluster domain secret
690
  @type cds_filename: string
691
  @param cds_filename: Path to file containing new cluster domain secret
692
  @type force: bool
693
  @param force: Whether to ask user for confirmation
694

695
  """
696
  if new_rapi_cert and rapi_cert_filename:
697
    ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
698
             " options can be specified at the same time.")
699
    return 1
700

    
701
  if new_cds and cds_filename:
702
    ToStderr("Only one of the --new-cluster-domain-secret and"
703
             " --cluster-domain-secret options can be specified at"
704
             " the same time.")
705
    return 1
706

    
707
  if rapi_cert_filename:
708
    # Read and verify new certificate
709
    try:
710
      rapi_cert_pem = utils.ReadFile(rapi_cert_filename)
711

    
712
      OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
713
                                      rapi_cert_pem)
714
    except Exception, err: # pylint: disable=W0703
715
      ToStderr("Can't load new RAPI certificate from %s: %s" %
716
               (rapi_cert_filename, str(err)))
717
      return 1
718

    
719
    try:
720
      OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, rapi_cert_pem)
721
    except Exception, err: # pylint: disable=W0703
722
      ToStderr("Can't load new RAPI private key from %s: %s" %
723
               (rapi_cert_filename, str(err)))
724
      return 1
725

    
726
  else:
727
    rapi_cert_pem = None
728

    
729
  if cds_filename:
730
    try:
731
      cds = utils.ReadFile(cds_filename)
732
    except Exception, err: # pylint: disable=W0703
733
      ToStderr("Can't load new cluster domain secret from %s: %s" %
734
               (cds_filename, str(err)))
735
      return 1
736
  else:
737
    cds = None
738

    
739
  if not force:
740
    usertext = ("This requires all daemons on all nodes to be restarted and"
741
                " may take some time. Continue?")
742
    if not AskUser(usertext):
743
      return 1
744

    
745
  def _RenewCryptoInner(ctx):
746
    ctx.feedback_fn("Updating certificates and keys")
747
    bootstrap.GenerateClusterCrypto(new_cluster_cert, new_rapi_cert,
748
                                    new_confd_hmac_key,
749
                                    new_cds,
750
                                    rapi_cert_pem=rapi_cert_pem,
751
                                    cds=cds)
752

    
753
    files_to_copy = []
754

    
755
    if new_cluster_cert:
756
      files_to_copy.append(constants.NODED_CERT_FILE)
757

    
758
    if new_rapi_cert or rapi_cert_pem:
759
      files_to_copy.append(constants.RAPI_CERT_FILE)
760

    
761
    if new_confd_hmac_key:
762
      files_to_copy.append(constants.CONFD_HMAC_KEY)
763

    
764
    if new_cds or cds:
765
      files_to_copy.append(constants.CLUSTER_DOMAIN_SECRET_FILE)
766

    
767
    if files_to_copy:
768
      for node_name in ctx.nonmaster_nodes:
769
        ctx.feedback_fn("Copying %s to %s" %
770
                        (", ".join(files_to_copy), node_name))
771
        for file_name in files_to_copy:
772
          ctx.ssh.CopyFileToNode(node_name, file_name)
773

    
774
  RunWhileClusterStopped(ToStdout, _RenewCryptoInner)
775

    
776
  ToStdout("All requested certificates and keys have been replaced."
777
           " Running \"gnt-cluster verify\" now is recommended.")
778

    
779
  return 0
780

    
781

    
782
def RenewCrypto(opts, args):
783
  """Renews cluster certificates, keys and secrets.
784

785
  """
786
  return _RenewCrypto(opts.new_cluster_cert,
787
                      opts.new_rapi_cert,
788
                      opts.rapi_cert,
789
                      opts.new_confd_hmac_key,
790
                      opts.new_cluster_domain_secret,
791
                      opts.cluster_domain_secret,
792
                      opts.force)
793

    
794

    
795
def SetClusterParams(opts, args):
796
  """Modify the cluster.
797

798
  @param opts: the command line options selected by the user
799
  @type args: list
800
  @param args: should be an empty list
801
  @rtype: int
802
  @return: the desired exit code
803

804
  """
805
  if not (not opts.lvm_storage or opts.vg_name or
806
          not opts.drbd_storage or opts.drbd_helper or
807
          opts.enabled_hypervisors or opts.hvparams or
808
          opts.beparams or opts.nicparams or opts.ndparams or
809
          opts.candidate_pool_size is not None or
810
          opts.uid_pool is not None or
811
          opts.maintain_node_health is not None or
812
          opts.add_uids is not None or
813
          opts.remove_uids is not None or
814
          opts.default_iallocator is not None or
815
          opts.reserved_lvs is not None or
816
          opts.master_netdev is not None or
817
          opts.prealloc_wipe_disks is not None):
818
    ToStderr("Please give at least one of the parameters.")
819
    return 1
820

    
821
  vg_name = opts.vg_name
822
  if not opts.lvm_storage and opts.vg_name:
823
    ToStderr("Options --no-lvm-storage and --vg-name conflict.")
824
    return 1
825

    
826
  if not opts.lvm_storage:
827
    vg_name = ""
828

    
829
  drbd_helper = opts.drbd_helper
830
  if not opts.drbd_storage and opts.drbd_helper:
831
    ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
832
    return 1
833

    
834
  if not opts.drbd_storage:
835
    drbd_helper = ""
836

    
837
  hvlist = opts.enabled_hypervisors
838
  if hvlist is not None:
839
    hvlist = hvlist.split(",")
840

    
841
  # a list of (name, dict) we can pass directly to dict() (or [])
842
  hvparams = dict(opts.hvparams)
843
  for hv_params in hvparams.values():
844
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
845

    
846
  beparams = opts.beparams
847
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
848

    
849
  nicparams = opts.nicparams
850
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
851

    
852
  ndparams = opts.ndparams
853
  if ndparams is not None:
854
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
855

    
856
  mnh = opts.maintain_node_health
857

    
858
  uid_pool = opts.uid_pool
859
  if uid_pool is not None:
860
    uid_pool = uidpool.ParseUidPool(uid_pool)
861

    
862
  add_uids = opts.add_uids
863
  if add_uids is not None:
864
    add_uids = uidpool.ParseUidPool(add_uids)
865

    
866
  remove_uids = opts.remove_uids
867
  if remove_uids is not None:
868
    remove_uids = uidpool.ParseUidPool(remove_uids)
869

    
870
  if opts.reserved_lvs is not None:
871
    if opts.reserved_lvs == "":
872
      opts.reserved_lvs = []
873
    else:
874
      opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
875

    
876
  op = opcodes.OpClusterSetParams(vg_name=vg_name,
877
                                  drbd_helper=drbd_helper,
878
                                  enabled_hypervisors=hvlist,
879
                                  hvparams=hvparams,
880
                                  os_hvp=None,
881
                                  beparams=beparams,
882
                                  nicparams=nicparams,
883
                                  ndparams=ndparams,
884
                                  candidate_pool_size=opts.candidate_pool_size,
885
                                  maintain_node_health=mnh,
886
                                  uid_pool=uid_pool,
887
                                  add_uids=add_uids,
888
                                  remove_uids=remove_uids,
889
                                  default_iallocator=opts.default_iallocator,
890
                                  prealloc_wipe_disks=opts.prealloc_wipe_disks,
891
                                  master_netdev=opts.master_netdev,
892
                                  reserved_lvs=opts.reserved_lvs)
893
  SubmitOpCode(op, opts=opts)
894
  return 0
895

    
896

    
897
def QueueOps(opts, args):
898
  """Queue operations.
899

900
  @param opts: the command line options selected by the user
901
  @type args: list
902
  @param args: should contain only one element, the subcommand
903
  @rtype: int
904
  @return: the desired exit code
905

906
  """
907
  command = args[0]
908
  client = GetClient()
909
  if command in ("drain", "undrain"):
910
    drain_flag = command == "drain"
911
    client.SetQueueDrainFlag(drain_flag)
912
  elif command == "info":
913
    result = client.QueryConfigValues(["drain_flag"])
914
    if result[0]:
915
      val = "set"
916
    else:
917
      val = "unset"
918
    ToStdout("The drain flag is %s" % val)
919
  else:
920
    raise errors.OpPrereqError("Command '%s' is not valid." % command,
921
                               errors.ECODE_INVAL)
922

    
923
  return 0
924

    
925

    
926
def _ShowWatcherPause(until):
927
  if until is None or until < time.time():
928
    ToStdout("The watcher is not paused.")
929
  else:
930
    ToStdout("The watcher is paused until %s.", time.ctime(until))
931

    
932

    
933
def WatcherOps(opts, args):
934
  """Watcher operations.
935

936
  @param opts: the command line options selected by the user
937
  @type args: list
938
  @param args: should contain only one element, the subcommand
939
  @rtype: int
940
  @return: the desired exit code
941

942
  """
943
  command = args[0]
944
  client = GetClient()
945

    
946
  if command == "continue":
947
    client.SetWatcherPause(None)
948
    ToStdout("The watcher is no longer paused.")
949

    
950
  elif command == "pause":
951
    if len(args) < 2:
952
      raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL)
953

    
954
    result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
955
    _ShowWatcherPause(result)
956

    
957
  elif command == "info":
958
    result = client.QueryConfigValues(["watcher_pause"])
959
    _ShowWatcherPause(result[0])
960

    
961
  else:
962
    raise errors.OpPrereqError("Command '%s' is not valid." % command,
963
                               errors.ECODE_INVAL)
964

    
965
  return 0
966

    
967

    
968
def _OobPower(opts, node_list, power):
969
  """Puts the node in the list to desired power state.
970

971
  @param opts: The command line options selected by the user
972
  @param node_list: The list of nodes to operate on
973
  @param power: True if they should be powered on, False otherwise
974
  @return: The success of the operation (none failed)
975

976
  """
977
  if power:
978
    command = constants.OOB_POWER_ON
979
  else:
980
    command = constants.OOB_POWER_OFF
981

    
982
  op = opcodes.OpOobCommand(node_names=node_list,
983
                            command=command,
984
                            ignore_status=True,
985
                            timeout=opts.oob_timeout,
986
                            power_delay=opts.power_delay)
987
  result = SubmitOpCode(op, opts=opts)
988
  errs = 0
989
  for node_result in result:
990
    (node_tuple, data_tuple) = node_result
991
    (_, node_name) = node_tuple
992
    (data_status, _) = data_tuple
993
    if data_status != constants.RS_NORMAL:
994
      assert data_status != constants.RS_UNAVAIL
995
      errs += 1
996
      ToStderr("There was a problem changing power for %s, please investigate",
997
               node_name)
998

    
999
  if errs > 0:
1000
    return False
1001

    
1002
  return True
1003

    
1004

    
1005
def _InstanceStart(opts, inst_list, start, no_remember=False):
1006
  """Puts the instances in the list to desired state.
1007

1008
  @param opts: The command line options selected by the user
1009
  @param inst_list: The list of instances to operate on
1010
  @param start: True if they should be started, False for shutdown
1011
  @param no_remember: If the instance state should be remembered
1012
  @return: The success of the operation (none failed)
1013

1014
  """
1015
  if start:
1016
    opcls = opcodes.OpInstanceStartup
1017
    text_submit, text_success, text_failed = ("startup", "started", "starting")
1018
  else:
1019
    opcls = compat.partial(opcodes.OpInstanceShutdown,
1020
                           timeout=opts.shutdown_timeout,
1021
                           no_remember=no_remember)
1022
    text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
1023

    
1024
  jex = JobExecutor(opts=opts)
1025

    
1026
  for inst in inst_list:
1027
    ToStdout("Submit %s of instance %s", text_submit, inst)
1028
    op = opcls(instance_name=inst)
1029
    jex.QueueJob(inst, op)
1030

    
1031
  results = jex.GetResults()
1032
  bad_cnt = len([1 for (success, _) in results if not success])
1033

    
1034
  if bad_cnt == 0:
1035
    ToStdout("All instances have been %s successfully", text_success)
1036
  else:
1037
    ToStderr("There were errors while %s instances:\n"
1038
             "%d error(s) out of %d instance(s)", text_failed, bad_cnt,
1039
             len(results))
1040
    return False
1041

    
1042
  return True
1043

    
1044

    
1045
class _RunWhenNodesReachableHelper:
1046
  """Helper class to make shared internal state sharing easier.
1047

1048
  @ivar success: Indicates if all action_cb calls were successful
1049

1050
  """
1051
  def __init__(self, node_list, action_cb, node2ip, port, feedback_fn,
1052
               _ping_fn=netutils.TcpPing, _sleep_fn=time.sleep):
1053
    """Init the object.
1054

1055
    @param node_list: The list of nodes to be reachable
1056
    @param action_cb: Callback called when a new host is reachable
1057
    @type node2ip: dict
1058
    @param node2ip: Node to ip mapping
1059
    @param port: The port to use for the TCP ping
1060
    @param feedback_fn: The function used for feedback
1061
    @param _ping_fn: Function to check reachabilty (for unittest use only)
1062
    @param _sleep_fn: Function to sleep (for unittest use only)
1063

1064
    """
1065
    self.down = set(node_list)
1066
    self.up = set()
1067
    self.node2ip = node2ip
1068
    self.success = True
1069
    self.action_cb = action_cb
1070
    self.port = port
1071
    self.feedback_fn = feedback_fn
1072
    self._ping_fn = _ping_fn
1073
    self._sleep_fn = _sleep_fn
1074

    
1075
  def __call__(self):
1076
    """When called we run action_cb.
1077

1078
    @raises utils.RetryAgain: When there are still down nodes
1079

1080
    """
1081
    if not self.action_cb(self.up):
1082
      self.success = False
1083

    
1084
    if self.down:
1085
      raise utils.RetryAgain()
1086
    else:
1087
      return self.success
1088

    
1089
  def Wait(self, secs):
1090
    """Checks if a host is up or waits remaining seconds.
1091

1092
    @param secs: The secs remaining
1093

1094
    """
1095
    start = time.time()
1096
    for node in self.down:
1097
      if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT,
1098
                       live_port_needed=True):
1099
        self.feedback_fn("Node %s became available" % node)
1100
        self.up.add(node)
1101
        self.down -= self.up
1102
        # If we have a node available there is the possibility to run the
1103
        # action callback successfully, therefore we don't wait and return
1104
        return
1105

    
1106
    self._sleep_fn(max(0.0, start + secs - time.time()))
1107

    
1108

    
1109
def _RunWhenNodesReachable(node_list, action_cb, interval):
1110
  """Run action_cb when nodes become reachable.
1111

1112
  @param node_list: The list of nodes to be reachable
1113
  @param action_cb: Callback called when a new host is reachable
1114
  @param interval: The earliest time to retry
1115

1116
  """
1117
  client = GetClient()
1118
  cluster_info = client.QueryClusterInfo()
1119
  if cluster_info["primary_ip_version"] == constants.IP4_VERSION:
1120
    family = netutils.IPAddress.family
1121
  else:
1122
    family = netutils.IP6Address.family
1123

    
1124
  node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
1125
                 for node in node_list)
1126

    
1127
  port = netutils.GetDaemonPort(constants.NODED)
1128
  helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port,
1129
                                        ToStdout)
1130

    
1131
  try:
1132
    return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
1133
                       wait_fn=helper.Wait)
1134
  except utils.RetryTimeout:
1135
    ToStderr("Time exceeded while waiting for nodes to become reachable"
1136
             " again:\n  - %s", "  - ".join(helper.down))
1137
    return False
1138

    
1139

    
1140
def _MaybeInstanceStartup(opts, inst_map, nodes_online,
1141
                          _instance_start_fn=_InstanceStart):
1142
  """Start the instances conditional based on node_states.
1143

1144
  @param opts: The command line options selected by the user
1145
  @param inst_map: A dict of inst -> nodes mapping
1146
  @param nodes_online: A list of nodes online
1147
  @param _instance_start_fn: Callback to start instances (unittest use only)
1148
  @return: Success of the operation on all instances
1149

1150
  """
1151
  start_inst_list = []
1152
  for (inst, nodes) in inst_map.items():
1153
    if not (nodes - nodes_online):
1154
      # All nodes the instance lives on are back online
1155
      start_inst_list.append(inst)
1156

    
1157
  for inst in start_inst_list:
1158
    del inst_map[inst]
1159

    
1160
  if start_inst_list:
1161
    return _instance_start_fn(opts, start_inst_list, True)
1162

    
1163
  return True
1164

    
1165

    
1166
def _EpoOn(opts, full_node_list, node_list, inst_map):
1167
  """Does the actual power on.
1168

1169
  @param opts: The command line options selected by the user
1170
  @param full_node_list: All nodes to operate on (includes nodes not supporting
1171
                         OOB)
1172
  @param node_list: The list of nodes to operate on (all need to support OOB)
1173
  @param inst_map: A dict of inst -> nodes mapping
1174
  @return: The desired exit status
1175

1176
  """
1177
  if node_list and not _OobPower(opts, node_list, False):
1178
    ToStderr("Not all nodes seem to get back up, investigate and start"
1179
             " manually if needed")
1180

    
1181
  # Wait for the nodes to be back up
1182
  action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
1183

    
1184
  ToStdout("Waiting until all nodes are available again")
1185
  if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL):
1186
    ToStderr("Please investigate and start stopped instances manually")
1187
    return constants.EXIT_FAILURE
1188

    
1189
  return constants.EXIT_SUCCESS
1190

    
1191

    
1192
def _EpoOff(opts, node_list, inst_map):
1193
  """Does the actual power off.
1194

1195
  @param opts: The command line options selected by the user
1196
  @param node_list: The list of nodes to operate on (all need to support OOB)
1197
  @param inst_map: A dict of inst -> nodes mapping
1198
  @return: The desired exit status
1199

1200
  """
1201
  if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True):
1202
    ToStderr("Please investigate and stop instances manually before continuing")
1203
    return constants.EXIT_FAILURE
1204

    
1205
  if not node_list:
1206
    return constants.EXIT_SUCCESS
1207

    
1208
  if _OobPower(opts, node_list, False):
1209
    return constants.EXIT_SUCCESS
1210
  else:
1211
    return constants.EXIT_FAILURE
1212

    
1213

    
1214
def Epo(opts, args):
1215
  """EPO operations.
1216

1217
  @param opts: the command line options selected by the user
1218
  @type args: list
1219
  @param args: should contain only one element, the subcommand
1220
  @rtype: int
1221
  @return: the desired exit code
1222

1223
  """
1224
  if opts.groups and opts.show_all:
1225
    ToStderr("Only one of --groups or --all are allowed")
1226
    return constants.EXIT_FAILURE
1227
  elif args and opts.show_all:
1228
    ToStderr("Arguments in combination with --all are not allowed")
1229
    return constants.EXIT_FAILURE
1230

    
1231
  client = GetClient()
1232

    
1233
  if opts.groups:
1234
    node_query_list = itertools.chain(*client.QueryGroups(names=args,
1235
                                                          fields=["node_list"],
1236
                                                          use_locking=False))
1237
  else:
1238
    node_query_list = args
1239

    
1240
  result = client.QueryNodes(names=node_query_list,
1241
                             fields=["name", "master", "pinst_list",
1242
                                     "sinst_list", "powered", "offline"],
1243
                             use_locking=False)
1244
  node_list = []
1245
  inst_map = {}
1246
  for (idx, (node, master, pinsts, sinsts, powered,
1247
             offline)) in enumerate(result):
1248
    # Normalize the node_query_list as well
1249
    if not opts.show_all:
1250
      node_query_list[idx] = node
1251
    if not offline:
1252
      for inst in (pinsts + sinsts):
1253
        if inst in inst_map:
1254
          if not master:
1255
            inst_map[inst].add(node)
1256
        elif master:
1257
          inst_map[inst] = set()
1258
        else:
1259
          inst_map[inst] = set([node])
1260

    
1261
    if master and opts.on:
1262
      # We ignore the master for turning on the machines, in fact we are
1263
      # already operating on the master at this point :)
1264
      continue
1265
    elif master and not opts.show_all:
1266
      ToStderr("%s is the master node, please do a master-failover to another"
1267
               " node not affected by the EPO or use --all if you intend to"
1268
               " shutdown the whole cluster", node)
1269
      return constants.EXIT_FAILURE
1270
    elif powered is None:
1271
      ToStdout("Node %s does not support out-of-band handling, it can not be"
1272
               " handled in a fully automated manner", node)
1273
    elif powered == opts.on:
1274
      ToStdout("Node %s is already in desired power state, skipping", node)
1275
    elif not offline or (offline and powered):
1276
      node_list.append(node)
1277

    
1278
  if not opts.force and not ConfirmOperation(node_query_list, "nodes", "epo"):
1279
    return constants.EXIT_FAILURE
1280

    
1281
  if opts.on:
1282
    return _EpoOn(opts, node_query_list, node_list, inst_map)
1283
  else:
1284
    return _EpoOff(opts, node_list, inst_map)
1285

    
1286

    
1287
commands = {
1288
  "init": (
1289
    InitCluster, [ArgHost(min=1, max=1)],
1290
    [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
1291
     HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, NIC_PARAMS_OPT,
1292
     NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT,
1293
     SECONDARY_IP_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
1294
     UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
1295
     DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
1296
     NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT],
1297
    "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
1298
  "destroy": (
1299
    DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
1300
    "", "Destroy cluster"),
1301
  "rename": (
1302
    RenameCluster, [ArgHost(min=1, max=1)],
1303
    [FORCE_OPT, DRY_RUN_OPT],
1304
    "<new_name>",
1305
    "Renames the cluster"),
1306
  "redist-conf": (
1307
    RedistributeConfig, ARGS_NONE, [SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
1308
    "", "Forces a push of the configuration file and ssconf files"
1309
    " to the nodes in the cluster"),
1310
  "verify": (
1311
    VerifyCluster, ARGS_NONE,
1312
    [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
1313
     DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT],
1314
    "", "Does a check on the cluster configuration"),
1315
  "verify-disks": (
1316
    VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
1317
    "", "Does a check on the cluster disk status"),
1318
  "repair-disk-sizes": (
1319
    RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
1320
    "[instance...]", "Updates mismatches in recorded disk sizes"),
1321
  "master-failover": (
1322
    MasterFailover, ARGS_NONE, [NOVOTING_OPT],
1323
    "", "Makes the current node the master"),
1324
  "master-ping": (
1325
    MasterPing, ARGS_NONE, [],
1326
    "", "Checks if the master is alive"),
1327
  "version": (
1328
    ShowClusterVersion, ARGS_NONE, [],
1329
    "", "Shows the cluster version"),
1330
  "getmaster": (
1331
    ShowClusterMaster, ARGS_NONE, [],
1332
    "", "Shows the cluster master"),
1333
  "copyfile": (
1334
    ClusterCopyFile, [ArgFile(min=1, max=1)],
1335
    [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
1336
    "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
1337
  "command": (
1338
    RunClusterCommand, [ArgCommand(min=1)],
1339
    [NODE_LIST_OPT, NODEGROUP_OPT],
1340
    "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
1341
  "info": (
1342
    ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
1343
    "[--roman]", "Show cluster configuration"),
1344
  "list-tags": (
1345
    ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
1346
  "add-tags": (
1347
    AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
1348
    "tag...", "Add tags to the cluster"),
1349
  "remove-tags": (
1350
    RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT],
1351
    "tag...", "Remove tags from the cluster"),
1352
  "search-tags": (
1353
    SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
1354
    "Searches the tags on all objects on"
1355
    " the cluster for a given pattern (regex)"),
1356
  "queue": (
1357
    QueueOps,
1358
    [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
1359
    [], "drain|undrain|info", "Change queue properties"),
1360
  "watcher": (
1361
    WatcherOps,
1362
    [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
1363
     ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
1364
    [],
1365
    "{pause <timespec>|continue|info}", "Change watcher properties"),
1366
  "modify": (
1367
    SetClusterParams, ARGS_NONE,
1368
    [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
1369
     NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
1370
     UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT, DRBD_HELPER_OPT,
1371
     NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT, RESERVED_LVS_OPT,
1372
     DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT],
1373
    "[opts...]",
1374
    "Alters the parameters of the cluster"),
1375
  "renew-crypto": (
1376
    RenewCrypto, ARGS_NONE,
1377
    [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
1378
     NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
1379
     NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT],
1380
    "[opts...]",
1381
    "Renews cluster certificates, keys and secrets"),
1382
  "epo": (
1383
    Epo, [ArgUnknown()],
1384
    [FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT,
1385
     SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
1386
    "[opts...] [args]",
1387
    "Performs an emergency power-off on given args"),
1388
  "activate-master-ip": (
1389
    ActivateMasterIp, ARGS_NONE, [], "", "Activates the master IP"),
1390
  "deactivate-master-ip": (
1391
    DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
1392
    "Deactivates the master IP"),
1393
  }
1394

    
1395

    
1396
#: dictionary with aliases for commands
1397
aliases = {
1398
  "masterfailover": "master-failover",
1399
}
1400

    
1401

    
1402
def Main():
1403
  return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER},
1404
                     aliases=aliases)