Statistics
| Branch: | Tag: | Revision:

root / lib / client / gnt_cluster.py @ 90017904

History | View | Annotate | Download (53.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21
"""Cluster related commands"""
22

    
23
# pylint: disable=W0401,W0613,W0614,C0103
24
# W0401: Wildcard import ganeti.cli
25
# W0613: Unused argument, since all functions follow the same API
26
# W0614: Unused import %s from wildcard import (since we need cli)
27
# C0103: Invalid name gnt-cluster
28

    
29
from cStringIO import StringIO
30
import os.path
31
import time
32
import OpenSSL
33
import itertools
34

    
35
from ganeti.cli import *
36
from ganeti import opcodes
37
from ganeti import constants
38
from ganeti import errors
39
from ganeti import utils
40
from ganeti import bootstrap
41
from ganeti import ssh
42
from ganeti import objects
43
from ganeti import uidpool
44
from ganeti import compat
45
from ganeti import netutils
46
from ganeti import pathutils
47

    
48

    
49
ON_OPT = cli_option("--on", default=False,
50
                    action="store_true", dest="on",
51
                    help="Recover from an EPO")
52

    
53
GROUPS_OPT = cli_option("--groups", default=False,
54
                        action="store_true", dest="groups",
55
                        help="Arguments are node groups instead of nodes")
56

    
57
FORCE_FAILOVER = cli_option("--yes-do-it", dest="yes_do_it",
58
                            help="Override interactive check for --no-voting",
59
                            default=False, action="store_true")
60

    
61
_EPO_PING_INTERVAL = 30 # 30 seconds between pings
62
_EPO_PING_TIMEOUT = 1 # 1 second
63
_EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
64

    
65

    
66
def _CheckNoLvmStorageOptDeprecated(opts):
67
  """Checks if the legacy option '--no-lvm-storage' is used.
68

69
  """
70
  if not opts.lvm_storage:
71
    ToStderr("The option --no-lvm-storage is no longer supported. If you want"
72
             " to disable lvm-based storage cluster-wide, use the option"
73
             " --enabled-disk-templates to disable all of these lvm-base disk "
74
             "  templates: %s" %
75
             utils.CommaJoin(utils.GetLvmDiskTemplates()))
76
    return 1
77

    
78

    
79
@UsesRPC
80
def InitCluster(opts, args):
81
  """Initialize the cluster.
82

83
  @param opts: the command line options selected by the user
84
  @type args: list
85
  @param args: should contain only one element, the desired
86
      cluster name
87
  @rtype: int
88
  @return: the desired exit code
89

90
  """
91
  if _CheckNoLvmStorageOptDeprecated(opts):
92
    return 1
93
  enabled_disk_templates = opts.enabled_disk_templates
94
  if enabled_disk_templates:
95
    enabled_disk_templates = enabled_disk_templates.split(",")
96
  else:
97
    enabled_disk_templates = constants.DEFAULT_ENABLED_DISK_TEMPLATES
98

    
99
  vg_name = None
100
  if opts.vg_name is not None:
101
    vg_name = opts.vg_name
102
    if vg_name:
103
      if not utils.IsLvmEnabled(enabled_disk_templates):
104
        ToStdout("You specified a volume group with --vg-name, but you did not"
105
                 " enable any disk template that uses lvm.")
106
    else:
107
      if utils.IsLvmEnabled(enabled_disk_templates):
108
        ToStderr("LVM disk templates are enabled, but vg name not set.")
109
        return 1
110
  else:
111
    if utils.IsLvmEnabled(enabled_disk_templates):
112
      vg_name = constants.DEFAULT_VG
113

    
114
  if not opts.drbd_storage and opts.drbd_helper:
115
    ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
116
    return 1
117

    
118
  drbd_helper = opts.drbd_helper
119
  if opts.drbd_storage and not opts.drbd_helper:
120
    drbd_helper = constants.DEFAULT_DRBD_HELPER
121

    
122
  master_netdev = opts.master_netdev
123
  if master_netdev is None:
124
    if not opts.nicparams[constants.NIC_MODE]:
125
      # default case, use bridging
126
      master_netdev = constants.DEFAULT_BRIDGE
127
    elif opts.nicparams[constants.NIC_MODE] == constants.NIC_MODE_OVS:
128
      # default ovs is different from default bridge
129
      master_netdev = constants.DEFAULT_OVS
130
      opts.nicparams[constants.NIC_LINK] = constants.DEFAULT_OVS
131

    
132
  hvlist = opts.enabled_hypervisors
133
  if hvlist is None:
134
    hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
135
  hvlist = hvlist.split(",")
136

    
137
  hvparams = dict(opts.hvparams)
138
  beparams = opts.beparams
139
  nicparams = opts.nicparams
140

    
141
  diskparams = dict(opts.diskparams)
142

    
143
  # check the disk template types here, as we cannot rely on the type check done
144
  # by the opcode parameter types
145
  diskparams_keys = set(diskparams.keys())
146
  if not (diskparams_keys <= constants.DISK_TEMPLATES):
147
    unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
148
    ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
149
    return 1
150

    
151
  # prepare beparams dict
152
  beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
153
  utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
154

    
155
  # prepare nicparams dict
156
  nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
157
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
158

    
159
  # prepare ndparams dict
160
  if opts.ndparams is None:
161
    ndparams = dict(constants.NDC_DEFAULTS)
162
  else:
163
    ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
164
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
165

    
166
  # prepare hvparams dict
167
  for hv in constants.HYPER_TYPES:
168
    if hv not in hvparams:
169
      hvparams[hv] = {}
170
    hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
171
    utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
172

    
173
  # prepare diskparams dict
174
  for templ in constants.DISK_TEMPLATES:
175
    if templ not in diskparams:
176
      diskparams[templ] = {}
177
    diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
178
                                         diskparams[templ])
179
    utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
180

    
181
  # prepare ipolicy dict
182
  ipolicy = CreateIPolicyFromOpts(
183
    ispecs_mem_size=opts.ispecs_mem_size,
184
    ispecs_cpu_count=opts.ispecs_cpu_count,
185
    ispecs_disk_count=opts.ispecs_disk_count,
186
    ispecs_disk_size=opts.ispecs_disk_size,
187
    ispecs_nic_count=opts.ispecs_nic_count,
188
    minmax_ispecs=opts.ipolicy_bounds_specs,
189
    std_ispecs=opts.ipolicy_std_specs,
190
    ipolicy_disk_templates=opts.ipolicy_disk_templates,
191
    ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
192
    ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
193
    fill_all=True)
194

    
195
  if opts.candidate_pool_size is None:
196
    opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
197

    
198
  if opts.mac_prefix is None:
199
    opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
200

    
201
  uid_pool = opts.uid_pool
202
  if uid_pool is not None:
203
    uid_pool = uidpool.ParseUidPool(uid_pool)
204

    
205
  if opts.prealloc_wipe_disks is None:
206
    opts.prealloc_wipe_disks = False
207

    
208
  external_ip_setup_script = opts.use_external_mip_script
209
  if external_ip_setup_script is None:
210
    external_ip_setup_script = False
211

    
212
  try:
213
    primary_ip_version = int(opts.primary_ip_version)
214
  except (ValueError, TypeError), err:
215
    ToStderr("Invalid primary ip version value: %s" % str(err))
216
    return 1
217

    
218
  master_netmask = opts.master_netmask
219
  try:
220
    if master_netmask is not None:
221
      master_netmask = int(master_netmask)
222
  except (ValueError, TypeError), err:
223
    ToStderr("Invalid master netmask value: %s" % str(err))
224
    return 1
225

    
226
  if opts.disk_state:
227
    disk_state = utils.FlatToDict(opts.disk_state)
228
  else:
229
    disk_state = {}
230

    
231
  hv_state = dict(opts.hv_state)
232

    
233
  bootstrap.InitCluster(cluster_name=args[0],
234
                        secondary_ip=opts.secondary_ip,
235
                        vg_name=vg_name,
236
                        mac_prefix=opts.mac_prefix,
237
                        master_netmask=master_netmask,
238
                        master_netdev=master_netdev,
239
                        file_storage_dir=opts.file_storage_dir,
240
                        shared_file_storage_dir=opts.shared_file_storage_dir,
241
                        enabled_hypervisors=hvlist,
242
                        hvparams=hvparams,
243
                        beparams=beparams,
244
                        nicparams=nicparams,
245
                        ndparams=ndparams,
246
                        diskparams=diskparams,
247
                        ipolicy=ipolicy,
248
                        candidate_pool_size=opts.candidate_pool_size,
249
                        modify_etc_hosts=opts.modify_etc_hosts,
250
                        modify_ssh_setup=opts.modify_ssh_setup,
251
                        maintain_node_health=opts.maintain_node_health,
252
                        drbd_helper=drbd_helper,
253
                        uid_pool=uid_pool,
254
                        default_iallocator=opts.default_iallocator,
255
                        primary_ip_version=primary_ip_version,
256
                        prealloc_wipe_disks=opts.prealloc_wipe_disks,
257
                        use_external_mip_script=external_ip_setup_script,
258
                        hv_state=hv_state,
259
                        disk_state=disk_state,
260
                        enabled_disk_templates=enabled_disk_templates,
261
                        )
262
  op = opcodes.OpClusterPostInit()
263
  SubmitOpCode(op, opts=opts)
264
  return 0
265

    
266

    
267
@UsesRPC
268
def DestroyCluster(opts, args):
269
  """Destroy the cluster.
270

271
  @param opts: the command line options selected by the user
272
  @type args: list
273
  @param args: should be an empty list
274
  @rtype: int
275
  @return: the desired exit code
276

277
  """
278
  if not opts.yes_do_it:
279
    ToStderr("Destroying a cluster is irreversible. If you really want"
280
             " destroy this cluster, supply the --yes-do-it option.")
281
    return 1
282

    
283
  op = opcodes.OpClusterDestroy()
284
  master_uuid = SubmitOpCode(op, opts=opts)
285
  # if we reached this, the opcode didn't fail; we can proceed to
286
  # shutdown all the daemons
287
  bootstrap.FinalizeClusterDestroy(master_uuid)
288
  return 0
289

    
290

    
291
def RenameCluster(opts, args):
292
  """Rename the cluster.
293

294
  @param opts: the command line options selected by the user
295
  @type args: list
296
  @param args: should contain only one element, the new cluster name
297
  @rtype: int
298
  @return: the desired exit code
299

300
  """
301
  cl = GetClient()
302

    
303
  (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
304

    
305
  new_name = args[0]
306
  if not opts.force:
307
    usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
308
                " connected over the network to the cluster name, the"
309
                " operation is very dangerous as the IP address will be"
310
                " removed from the node and the change may not go through."
311
                " Continue?") % (cluster_name, new_name)
312
    if not AskUser(usertext):
313
      return 1
314

    
315
  op = opcodes.OpClusterRename(name=new_name)
316
  result = SubmitOpCode(op, opts=opts, cl=cl)
317

    
318
  if result:
319
    ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
320

    
321
  return 0
322

    
323

    
324
def ActivateMasterIp(opts, args):
325
  """Activates the master IP.
326

327
  """
328
  op = opcodes.OpClusterActivateMasterIp()
329
  SubmitOpCode(op)
330
  return 0
331

    
332

    
333
def DeactivateMasterIp(opts, args):
334
  """Deactivates the master IP.
335

336
  """
337
  if not opts.confirm:
338
    usertext = ("This will disable the master IP. All the open connections to"
339
                " the master IP will be closed. To reach the master you will"
340
                " need to use its node IP."
341
                " Continue?")
342
    if not AskUser(usertext):
343
      return 1
344

    
345
  op = opcodes.OpClusterDeactivateMasterIp()
346
  SubmitOpCode(op)
347
  return 0
348

    
349

    
350
def RedistributeConfig(opts, args):
351
  """Forces push of the cluster configuration.
352

353
  @param opts: the command line options selected by the user
354
  @type args: list
355
  @param args: empty list
356
  @rtype: int
357
  @return: the desired exit code
358

359
  """
360
  op = opcodes.OpClusterRedistConf()
361
  SubmitOrSend(op, opts)
362
  return 0
363

    
364

    
365
def ShowClusterVersion(opts, args):
366
  """Write version of ganeti software to the standard output.
367

368
  @param opts: the command line options selected by the user
369
  @type args: list
370
  @param args: should be an empty list
371
  @rtype: int
372
  @return: the desired exit code
373

374
  """
375
  cl = GetClient(query=True)
376
  result = cl.QueryClusterInfo()
377
  ToStdout("Software version: %s", result["software_version"])
378
  ToStdout("Internode protocol: %s", result["protocol_version"])
379
  ToStdout("Configuration format: %s", result["config_version"])
380
  ToStdout("OS api version: %s", result["os_api_version"])
381
  ToStdout("Export interface: %s", result["export_version"])
382
  return 0
383

    
384

    
385
def ShowClusterMaster(opts, args):
386
  """Write name of master node to the standard output.
387

388
  @param opts: the command line options selected by the user
389
  @type args: list
390
  @param args: should be an empty list
391
  @rtype: int
392
  @return: the desired exit code
393

394
  """
395
  master = bootstrap.GetMaster()
396
  ToStdout(master)
397
  return 0
398

    
399

    
400
def _FormatGroupedParams(paramsdict, roman=False):
401
  """Format Grouped parameters (be, nic, disk) by group.
402

403
  @type paramsdict: dict of dicts
404
  @param paramsdict: {group: {param: value, ...}, ...}
405
  @rtype: dict of dicts
406
  @return: copy of the input dictionaries with strings as values
407

408
  """
409
  ret = {}
410
  for (item, val) in paramsdict.items():
411
    if isinstance(val, dict):
412
      ret[item] = _FormatGroupedParams(val, roman=roman)
413
    elif roman and isinstance(val, int):
414
      ret[item] = compat.TryToRoman(val)
415
    else:
416
      ret[item] = str(val)
417
  return ret
418

    
419

    
420
def ShowClusterConfig(opts, args):
421
  """Shows cluster information.
422

423
  @param opts: the command line options selected by the user
424
  @type args: list
425
  @param args: should be an empty list
426
  @rtype: int
427
  @return: the desired exit code
428

429
  """
430
  cl = GetClient(query=True)
431
  result = cl.QueryClusterInfo()
432

    
433
  if result["tags"]:
434
    tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
435
  else:
436
    tags = "(none)"
437
  if result["reserved_lvs"]:
438
    reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
439
  else:
440
    reserved_lvs = "(none)"
441

    
442
  enabled_hv = result["enabled_hypervisors"]
443
  hvparams = dict((k, v) for k, v in result["hvparams"].iteritems()
444
                  if k in enabled_hv)
445

    
446
  info = [
447
    ("Cluster name", result["name"]),
448
    ("Cluster UUID", result["uuid"]),
449

    
450
    ("Creation time", utils.FormatTime(result["ctime"])),
451
    ("Modification time", utils.FormatTime(result["mtime"])),
452

    
453
    ("Master node", result["master"]),
454

    
455
    ("Architecture (this node)",
456
     "%s (%s)" % (result["architecture"][0], result["architecture"][1])),
457

    
458
    ("Tags", tags),
459

    
460
    ("Default hypervisor", result["default_hypervisor"]),
461
    ("Enabled hypervisors", utils.CommaJoin(enabled_hv)),
462

    
463
    ("Hypervisor parameters", _FormatGroupedParams(hvparams)),
464

    
465
    ("OS-specific hypervisor parameters",
466
     _FormatGroupedParams(result["os_hvp"])),
467

    
468
    ("OS parameters", _FormatGroupedParams(result["osparams"])),
469

    
470
    ("Hidden OSes", utils.CommaJoin(result["hidden_os"])),
471
    ("Blacklisted OSes", utils.CommaJoin(result["blacklisted_os"])),
472

    
473
    ("Cluster parameters", [
474
      ("candidate pool size",
475
       compat.TryToRoman(result["candidate_pool_size"],
476
                         convert=opts.roman_integers)),
477
      ("master netdev", result["master_netdev"]),
478
      ("master netmask", result["master_netmask"]),
479
      ("use external master IP address setup script",
480
       result["use_external_mip_script"]),
481
      ("lvm volume group", result["volume_group_name"]),
482
      ("lvm reserved volumes", reserved_lvs),
483
      ("drbd usermode helper", result["drbd_usermode_helper"]),
484
      ("file storage path", result["file_storage_dir"]),
485
      ("shared file storage path", result["shared_file_storage_dir"]),
486
      ("maintenance of node health", result["maintain_node_health"]),
487
      ("uid pool", uidpool.FormatUidPool(result["uid_pool"])),
488
      ("default instance allocator", result["default_iallocator"]),
489
      ("primary ip version", result["primary_ip_version"]),
490
      ("preallocation wipe disks", result["prealloc_wipe_disks"]),
491
      ("OS search path", utils.CommaJoin(pathutils.OS_SEARCH_PATH)),
492
      ("ExtStorage Providers search path",
493
       utils.CommaJoin(pathutils.ES_SEARCH_PATH)),
494
      ("enabled disk templates",
495
       utils.CommaJoin(result["enabled_disk_templates"])),
496
      ]),
497

    
498
    ("Default node parameters",
499
     _FormatGroupedParams(result["ndparams"], roman=opts.roman_integers)),
500

    
501
    ("Default instance parameters",
502
     _FormatGroupedParams(result["beparams"], roman=opts.roman_integers)),
503

    
504
    ("Default nic parameters",
505
     _FormatGroupedParams(result["nicparams"], roman=opts.roman_integers)),
506

    
507
    ("Default disk parameters",
508
     _FormatGroupedParams(result["diskparams"], roman=opts.roman_integers)),
509

    
510
    ("Instance policy - limits for instances",
511
     FormatPolicyInfo(result["ipolicy"], None, True)),
512
    ]
513

    
514
  PrintGenericInfo(info)
515
  return 0
516

    
517

    
518
def ClusterCopyFile(opts, args):
519
  """Copy a file from master to some nodes.
520

521
  @param opts: the command line options selected by the user
522
  @type args: list
523
  @param args: should contain only one element, the path of
524
      the file to be copied
525
  @rtype: int
526
  @return: the desired exit code
527

528
  """
529
  filename = args[0]
530
  if not os.path.exists(filename):
531
    raise errors.OpPrereqError("No such filename '%s'" % filename,
532
                               errors.ECODE_INVAL)
533

    
534
  cl = GetClient()
535

    
536
  cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
537

    
538
  results = GetOnlineNodes(nodes=opts.nodes, cl=cl, filter_master=True,
539
                           secondary_ips=opts.use_replication_network,
540
                           nodegroup=opts.nodegroup)
541

    
542
  srun = ssh.SshRunner(cluster_name)
543
  for node in results:
544
    if not srun.CopyFileToNode(node, filename):
545
      ToStderr("Copy of file %s to node %s failed", filename, node)
546

    
547
  return 0
548

    
549

    
550
def RunClusterCommand(opts, args):
551
  """Run a command on some nodes.
552

553
  @param opts: the command line options selected by the user
554
  @type args: list
555
  @param args: should contain the command to be run and its arguments
556
  @rtype: int
557
  @return: the desired exit code
558

559
  """
560
  cl = GetClient()
561

    
562
  command = " ".join(args)
563

    
564
  nodes = GetOnlineNodes(nodes=opts.nodes, cl=cl, nodegroup=opts.nodegroup)
565

    
566
  cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
567
                                                    "master_node"])
568

    
569
  srun = ssh.SshRunner(cluster_name=cluster_name)
570

    
571
  # Make sure master node is at list end
572
  if master_node in nodes:
573
    nodes.remove(master_node)
574
    nodes.append(master_node)
575

    
576
  for name in nodes:
577
    result = srun.Run(name, constants.SSH_LOGIN_USER, command)
578

    
579
    if opts.failure_only and result.exit_code == constants.EXIT_SUCCESS:
580
      # Do not output anything for successful commands
581
      continue
582

    
583
    ToStdout("------------------------------------------------")
584
    if opts.show_machine_names:
585
      for line in result.output.splitlines():
586
        ToStdout("%s: %s", name, line)
587
    else:
588
      ToStdout("node: %s", name)
589
      ToStdout("%s", result.output)
590
    ToStdout("return code = %s", result.exit_code)
591

    
592
  return 0
593

    
594

    
595
def VerifyCluster(opts, args):
596
  """Verify integrity of cluster, performing various test on nodes.
597

598
  @param opts: the command line options selected by the user
599
  @type args: list
600
  @param args: should be an empty list
601
  @rtype: int
602
  @return: the desired exit code
603

604
  """
605
  skip_checks = []
606

    
607
  if opts.skip_nplusone_mem:
608
    skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
609

    
610
  cl = GetClient()
611

    
612
  op = opcodes.OpClusterVerify(verbose=opts.verbose,
613
                               error_codes=opts.error_codes,
614
                               debug_simulate_errors=opts.simulate_errors,
615
                               skip_checks=skip_checks,
616
                               ignore_errors=opts.ignore_errors,
617
                               group_name=opts.nodegroup)
618
  result = SubmitOpCode(op, cl=cl, opts=opts)
619

    
620
  # Keep track of submitted jobs
621
  jex = JobExecutor(cl=cl, opts=opts)
622

    
623
  for (status, job_id) in result[constants.JOB_IDS_KEY]:
624
    jex.AddJobId(None, status, job_id)
625

    
626
  results = jex.GetResults()
627

    
628
  (bad_jobs, bad_results) = \
629
    map(len,
630
        # Convert iterators to lists
631
        map(list,
632
            # Count errors
633
            map(compat.partial(itertools.ifilterfalse, bool),
634
                # Convert result to booleans in a tuple
635
                zip(*((job_success, len(op_results) == 1 and op_results[0])
636
                      for (job_success, op_results) in results)))))
637

    
638
  if bad_jobs == 0 and bad_results == 0:
639
    rcode = constants.EXIT_SUCCESS
640
  else:
641
    rcode = constants.EXIT_FAILURE
642
    if bad_jobs > 0:
643
      ToStdout("%s job(s) failed while verifying the cluster.", bad_jobs)
644

    
645
  return rcode
646

    
647

    
648
def VerifyDisks(opts, args):
649
  """Verify integrity of cluster disks.
650

651
  @param opts: the command line options selected by the user
652
  @type args: list
653
  @param args: should be an empty list
654
  @rtype: int
655
  @return: the desired exit code
656

657
  """
658
  cl = GetClient()
659

    
660
  op = opcodes.OpClusterVerifyDisks()
661

    
662
  result = SubmitOpCode(op, cl=cl, opts=opts)
663

    
664
  # Keep track of submitted jobs
665
  jex = JobExecutor(cl=cl, opts=opts)
666

    
667
  for (status, job_id) in result[constants.JOB_IDS_KEY]:
668
    jex.AddJobId(None, status, job_id)
669

    
670
  retcode = constants.EXIT_SUCCESS
671

    
672
  for (status, result) in jex.GetResults():
673
    if not status:
674
      ToStdout("Job failed: %s", result)
675
      continue
676

    
677
    ((bad_nodes, instances, missing), ) = result
678

    
679
    for node, text in bad_nodes.items():
680
      ToStdout("Error gathering data on node %s: %s",
681
               node, utils.SafeEncode(text[-400:]))
682
      retcode = constants.EXIT_FAILURE
683
      ToStdout("You need to fix these nodes first before fixing instances")
684

    
685
    for iname in instances:
686
      if iname in missing:
687
        continue
688
      op = opcodes.OpInstanceActivateDisks(instance_name=iname)
689
      try:
690
        ToStdout("Activating disks for instance '%s'", iname)
691
        SubmitOpCode(op, opts=opts, cl=cl)
692
      except errors.GenericError, err:
693
        nret, msg = FormatError(err)
694
        retcode |= nret
695
        ToStderr("Error activating disks for instance %s: %s", iname, msg)
696

    
697
    if missing:
698
      for iname, ival in missing.iteritems():
699
        all_missing = compat.all(x[0] in bad_nodes for x in ival)
700
        if all_missing:
701
          ToStdout("Instance %s cannot be verified as it lives on"
702
                   " broken nodes", iname)
703
        else:
704
          ToStdout("Instance %s has missing logical volumes:", iname)
705
          ival.sort()
706
          for node, vol in ival:
707
            if node in bad_nodes:
708
              ToStdout("\tbroken node %s /dev/%s", node, vol)
709
            else:
710
              ToStdout("\t%s /dev/%s", node, vol)
711

    
712
      ToStdout("You need to replace or recreate disks for all the above"
713
               " instances if this message persists after fixing broken nodes.")
714
      retcode = constants.EXIT_FAILURE
715
    elif not instances:
716
      ToStdout("No disks need to be activated.")
717

    
718
  return retcode
719

    
720

    
721
def RepairDiskSizes(opts, args):
722
  """Verify sizes of cluster disks.
723

724
  @param opts: the command line options selected by the user
725
  @type args: list
726
  @param args: optional list of instances to restrict check to
727
  @rtype: int
728
  @return: the desired exit code
729

730
  """
731
  op = opcodes.OpClusterRepairDiskSizes(instances=args)
732
  SubmitOpCode(op, opts=opts)
733

    
734

    
735
@UsesRPC
736
def MasterFailover(opts, args):
737
  """Failover the master node.
738

739
  This command, when run on a non-master node, will cause the current
740
  master to cease being master, and the non-master to become new
741
  master.
742

743
  @param opts: the command line options selected by the user
744
  @type args: list
745
  @param args: should be an empty list
746
  @rtype: int
747
  @return: the desired exit code
748

749
  """
750
  if opts.no_voting and not opts.yes_do_it:
751
    usertext = ("This will perform the failover even if most other nodes"
752
                " are down, or if this node is outdated. This is dangerous"
753
                " as it can lead to a non-consistent cluster. Check the"
754
                " gnt-cluster(8) man page before proceeding. Continue?")
755
    if not AskUser(usertext):
756
      return 1
757

    
758
  return bootstrap.MasterFailover(no_voting=opts.no_voting)
759

    
760

    
761
def MasterPing(opts, args):
762
  """Checks if the master is alive.
763

764
  @param opts: the command line options selected by the user
765
  @type args: list
766
  @param args: should be an empty list
767
  @rtype: int
768
  @return: the desired exit code
769

770
  """
771
  try:
772
    cl = GetClient()
773
    cl.QueryClusterInfo()
774
    return 0
775
  except Exception: # pylint: disable=W0703
776
    return 1
777

    
778

    
779
def SearchTags(opts, args):
780
  """Searches the tags on all the cluster.
781

782
  @param opts: the command line options selected by the user
783
  @type args: list
784
  @param args: should contain only one element, the tag pattern
785
  @rtype: int
786
  @return: the desired exit code
787

788
  """
789
  op = opcodes.OpTagsSearch(pattern=args[0])
790
  result = SubmitOpCode(op, opts=opts)
791
  if not result:
792
    return 1
793
  result = list(result)
794
  result.sort()
795
  for path, tag in result:
796
    ToStdout("%s %s", path, tag)
797

    
798

    
799
def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
800
  """Reads and verifies an X509 certificate.
801

802
  @type cert_filename: string
803
  @param cert_filename: the path of the file containing the certificate to
804
                        verify encoded in PEM format
805
  @type verify_private_key: bool
806
  @param verify_private_key: whether to verify the private key in addition to
807
                             the public certificate
808
  @rtype: string
809
  @return: a string containing the PEM-encoded certificate.
810

811
  """
812
  try:
813
    pem = utils.ReadFile(cert_filename)
814
  except IOError, err:
815
    raise errors.X509CertError(cert_filename,
816
                               "Unable to read certificate: %s" % str(err))
817

    
818
  try:
819
    OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
820
  except Exception, err:
821
    raise errors.X509CertError(cert_filename,
822
                               "Unable to load certificate: %s" % str(err))
823

    
824
  if verify_private_key:
825
    try:
826
      OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
827
    except Exception, err:
828
      raise errors.X509CertError(cert_filename,
829
                                 "Unable to load private key: %s" % str(err))
830

    
831
  return pem
832

    
833

    
834
def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
835
                 rapi_cert_filename, new_spice_cert, spice_cert_filename,
836
                 spice_cacert_filename, new_confd_hmac_key, new_cds,
837
                 cds_filename, force):
838
  """Renews cluster certificates, keys and secrets.
839

840
  @type new_cluster_cert: bool
841
  @param new_cluster_cert: Whether to generate a new cluster certificate
842
  @type new_rapi_cert: bool
843
  @param new_rapi_cert: Whether to generate a new RAPI certificate
844
  @type rapi_cert_filename: string
845
  @param rapi_cert_filename: Path to file containing new RAPI certificate
846
  @type new_spice_cert: bool
847
  @param new_spice_cert: Whether to generate a new SPICE certificate
848
  @type spice_cert_filename: string
849
  @param spice_cert_filename: Path to file containing new SPICE certificate
850
  @type spice_cacert_filename: string
851
  @param spice_cacert_filename: Path to file containing the certificate of the
852
                                CA that signed the SPICE certificate
853
  @type new_confd_hmac_key: bool
854
  @param new_confd_hmac_key: Whether to generate a new HMAC key
855
  @type new_cds: bool
856
  @param new_cds: Whether to generate a new cluster domain secret
857
  @type cds_filename: string
858
  @param cds_filename: Path to file containing new cluster domain secret
859
  @type force: bool
860
  @param force: Whether to ask user for confirmation
861

862
  """
863
  if new_rapi_cert and rapi_cert_filename:
864
    ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
865
             " options can be specified at the same time.")
866
    return 1
867

    
868
  if new_cds and cds_filename:
869
    ToStderr("Only one of the --new-cluster-domain-secret and"
870
             " --cluster-domain-secret options can be specified at"
871
             " the same time.")
872
    return 1
873

    
874
  if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
875
    ToStderr("When using --new-spice-certificate, the --spice-certificate"
876
             " and --spice-ca-certificate must not be used.")
877
    return 1
878

    
879
  if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
880
    ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
881
             " specified.")
882
    return 1
883

    
884
  rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
885
  try:
886
    if rapi_cert_filename:
887
      rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
888
    if spice_cert_filename:
889
      spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
890
      spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
891
  except errors.X509CertError, err:
892
    ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
893
    return 1
894

    
895
  if cds_filename:
896
    try:
897
      cds = utils.ReadFile(cds_filename)
898
    except Exception, err: # pylint: disable=W0703
899
      ToStderr("Can't load new cluster domain secret from %s: %s" %
900
               (cds_filename, str(err)))
901
      return 1
902
  else:
903
    cds = None
904

    
905
  if not force:
906
    usertext = ("This requires all daemons on all nodes to be restarted and"
907
                " may take some time. Continue?")
908
    if not AskUser(usertext):
909
      return 1
910

    
911
  def _RenewCryptoInner(ctx):
912
    ctx.feedback_fn("Updating certificates and keys")
913
    bootstrap.GenerateClusterCrypto(new_cluster_cert,
914
                                    new_rapi_cert,
915
                                    new_spice_cert,
916
                                    new_confd_hmac_key,
917
                                    new_cds,
918
                                    rapi_cert_pem=rapi_cert_pem,
919
                                    spice_cert_pem=spice_cert_pem,
920
                                    spice_cacert_pem=spice_cacert_pem,
921
                                    cds=cds)
922

    
923
    files_to_copy = []
924

    
925
    if new_cluster_cert:
926
      files_to_copy.append(pathutils.NODED_CERT_FILE)
927

    
928
    if new_rapi_cert or rapi_cert_pem:
929
      files_to_copy.append(pathutils.RAPI_CERT_FILE)
930

    
931
    if new_spice_cert or spice_cert_pem:
932
      files_to_copy.append(pathutils.SPICE_CERT_FILE)
933
      files_to_copy.append(pathutils.SPICE_CACERT_FILE)
934

    
935
    if new_confd_hmac_key:
936
      files_to_copy.append(pathutils.CONFD_HMAC_KEY)
937

    
938
    if new_cds or cds:
939
      files_to_copy.append(pathutils.CLUSTER_DOMAIN_SECRET_FILE)
940

    
941
    if files_to_copy:
942
      for node_name in ctx.nonmaster_nodes:
943
        ctx.feedback_fn("Copying %s to %s" %
944
                        (", ".join(files_to_copy), node_name))
945
        for file_name in files_to_copy:
946
          ctx.ssh.CopyFileToNode(node_name, file_name)
947

    
948
  RunWhileClusterStopped(ToStdout, _RenewCryptoInner)
949

    
950
  ToStdout("All requested certificates and keys have been replaced."
951
           " Running \"gnt-cluster verify\" now is recommended.")
952

    
953
  return 0
954

    
955

    
956
def RenewCrypto(opts, args):
957
  """Renews cluster certificates, keys and secrets.
958

959
  """
960
  return _RenewCrypto(opts.new_cluster_cert,
961
                      opts.new_rapi_cert,
962
                      opts.rapi_cert,
963
                      opts.new_spice_cert,
964
                      opts.spice_cert,
965
                      opts.spice_cacert,
966
                      opts.new_confd_hmac_key,
967
                      opts.new_cluster_domain_secret,
968
                      opts.cluster_domain_secret,
969
                      opts.force)
970

    
971

    
972
def SetClusterParams(opts, args):
973
  """Modify the cluster.
974

975
  @param opts: the command line options selected by the user
976
  @type args: list
977
  @param args: should be an empty list
978
  @rtype: int
979
  @return: the desired exit code
980

981
  """
982
  if not (opts.vg_name is not None or opts.drbd_helper or
983
          opts.enabled_hypervisors or opts.hvparams or
984
          opts.beparams or opts.nicparams or
985
          opts.ndparams or opts.diskparams or
986
          opts.candidate_pool_size is not None or
987
          opts.uid_pool is not None or
988
          opts.maintain_node_health is not None or
989
          opts.add_uids is not None or
990
          opts.remove_uids is not None or
991
          opts.default_iallocator is not None or
992
          opts.reserved_lvs is not None or
993
          opts.master_netdev is not None or
994
          opts.master_netmask is not None or
995
          opts.use_external_mip_script is not None or
996
          opts.prealloc_wipe_disks is not None or
997
          opts.hv_state or
998
          opts.enabled_disk_templates or
999
          opts.disk_state or
1000
          opts.ipolicy_bounds_specs is not None or
1001
          opts.ipolicy_std_specs is not None or
1002
          opts.ipolicy_disk_templates is not None or
1003
          opts.ipolicy_vcpu_ratio is not None or
1004
          opts.ipolicy_spindle_ratio is not None or
1005
          opts.modify_etc_hosts is not None):
1006
    ToStderr("Please give at least one of the parameters.")
1007
    return 1
1008

    
1009
  if _CheckNoLvmStorageOptDeprecated(opts):
1010
    return 1
1011

    
1012
  enabled_disk_templates = None
1013
  if opts.enabled_disk_templates:
1014
    enabled_disk_templates = opts.enabled_disk_templates.split(",")
1015

    
1016
  # consistency between vg name and enabled disk templates
1017
  vg_name = None
1018
  if opts.vg_name is not None:
1019
    vg_name = opts.vg_name
1020
  if enabled_disk_templates:
1021
    if vg_name and not utils.IsLvmEnabled(enabled_disk_templates):
1022
      ToStdout("You specified a volume group with --vg-name, but you did not"
1023
               " enable any of the following lvm-based disk templates: %s" %
1024
               utils.CommaJoin(utils.GetLvmDiskTemplates()))
1025

    
1026
  drbd_helper = opts.drbd_helper
1027
  if not opts.drbd_storage and opts.drbd_helper:
1028
    ToStderr("Options --no-drbd-storage and --drbd-usermode-helper conflict.")
1029
    return 1
1030

    
1031
  if not opts.drbd_storage:
1032
    drbd_helper = ""
1033

    
1034
  hvlist = opts.enabled_hypervisors
1035
  if hvlist is not None:
1036
    hvlist = hvlist.split(",")
1037

    
1038
  # a list of (name, dict) we can pass directly to dict() (or [])
1039
  hvparams = dict(opts.hvparams)
1040
  for hv_params in hvparams.values():
1041
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1042

    
1043
  diskparams = dict(opts.diskparams)
1044

    
1045
  for dt_params in diskparams.values():
1046
    utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
1047

    
1048
  beparams = opts.beparams
1049
  utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
1050

    
1051
  nicparams = opts.nicparams
1052
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
1053

    
1054
  ndparams = opts.ndparams
1055
  if ndparams is not None:
1056
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
1057

    
1058
  ipolicy = CreateIPolicyFromOpts(
1059
    minmax_ispecs=opts.ipolicy_bounds_specs,
1060
    std_ispecs=opts.ipolicy_std_specs,
1061
    ipolicy_disk_templates=opts.ipolicy_disk_templates,
1062
    ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
1063
    ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
1064
    )
1065

    
1066
  mnh = opts.maintain_node_health
1067

    
1068
  uid_pool = opts.uid_pool
1069
  if uid_pool is not None:
1070
    uid_pool = uidpool.ParseUidPool(uid_pool)
1071

    
1072
  add_uids = opts.add_uids
1073
  if add_uids is not None:
1074
    add_uids = uidpool.ParseUidPool(add_uids)
1075

    
1076
  remove_uids = opts.remove_uids
1077
  if remove_uids is not None:
1078
    remove_uids = uidpool.ParseUidPool(remove_uids)
1079

    
1080
  if opts.reserved_lvs is not None:
1081
    if opts.reserved_lvs == "":
1082
      opts.reserved_lvs = []
1083
    else:
1084
      opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
1085

    
1086
  if opts.master_netmask is not None:
1087
    try:
1088
      opts.master_netmask = int(opts.master_netmask)
1089
    except ValueError:
1090
      ToStderr("The --master-netmask option expects an int parameter.")
1091
      return 1
1092

    
1093
  ext_ip_script = opts.use_external_mip_script
1094

    
1095
  if opts.disk_state:
1096
    disk_state = utils.FlatToDict(opts.disk_state)
1097
  else:
1098
    disk_state = {}
1099

    
1100
  hv_state = dict(opts.hv_state)
1101

    
1102
  op = opcodes.OpClusterSetParams(
1103
    vg_name=vg_name,
1104
    drbd_helper=drbd_helper,
1105
    enabled_hypervisors=hvlist,
1106
    hvparams=hvparams,
1107
    os_hvp=None,
1108
    beparams=beparams,
1109
    nicparams=nicparams,
1110
    ndparams=ndparams,
1111
    diskparams=diskparams,
1112
    ipolicy=ipolicy,
1113
    candidate_pool_size=opts.candidate_pool_size,
1114
    maintain_node_health=mnh,
1115
    modify_etc_hosts=opts.modify_etc_hosts,
1116
    uid_pool=uid_pool,
1117
    add_uids=add_uids,
1118
    remove_uids=remove_uids,
1119
    default_iallocator=opts.default_iallocator,
1120
    prealloc_wipe_disks=opts.prealloc_wipe_disks,
1121
    master_netdev=opts.master_netdev,
1122
    master_netmask=opts.master_netmask,
1123
    reserved_lvs=opts.reserved_lvs,
1124
    use_external_mip_script=ext_ip_script,
1125
    hv_state=hv_state,
1126
    disk_state=disk_state,
1127
    enabled_disk_templates=enabled_disk_templates,
1128
    force=opts.force,
1129
    )
1130
  SubmitOrSend(op, opts)
1131
  return 0
1132

    
1133

    
1134
def QueueOps(opts, args):
1135
  """Queue operations.
1136

1137
  @param opts: the command line options selected by the user
1138
  @type args: list
1139
  @param args: should contain only one element, the subcommand
1140
  @rtype: int
1141
  @return: the desired exit code
1142

1143
  """
1144
  command = args[0]
1145
  client = GetClient()
1146
  if command in ("drain", "undrain"):
1147
    drain_flag = command == "drain"
1148
    client.SetQueueDrainFlag(drain_flag)
1149
  elif command == "info":
1150
    result = client.QueryConfigValues(["drain_flag"])
1151
    if result[0]:
1152
      val = "set"
1153
    else:
1154
      val = "unset"
1155
    ToStdout("The drain flag is %s" % val)
1156
  else:
1157
    raise errors.OpPrereqError("Command '%s' is not valid." % command,
1158
                               errors.ECODE_INVAL)
1159

    
1160
  return 0
1161

    
1162

    
1163
def _ShowWatcherPause(until):
1164
  if until is None or until < time.time():
1165
    ToStdout("The watcher is not paused.")
1166
  else:
1167
    ToStdout("The watcher is paused until %s.", time.ctime(until))
1168

    
1169

    
1170
def WatcherOps(opts, args):
1171
  """Watcher operations.
1172

1173
  @param opts: the command line options selected by the user
1174
  @type args: list
1175
  @param args: should contain only one element, the subcommand
1176
  @rtype: int
1177
  @return: the desired exit code
1178

1179
  """
1180
  command = args[0]
1181
  client = GetClient()
1182

    
1183
  if command == "continue":
1184
    client.SetWatcherPause(None)
1185
    ToStdout("The watcher is no longer paused.")
1186

    
1187
  elif command == "pause":
1188
    if len(args) < 2:
1189
      raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL)
1190

    
1191
    result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
1192
    _ShowWatcherPause(result)
1193

    
1194
  elif command == "info":
1195
    result = client.QueryConfigValues(["watcher_pause"])
1196
    _ShowWatcherPause(result[0])
1197

    
1198
  else:
1199
    raise errors.OpPrereqError("Command '%s' is not valid." % command,
1200
                               errors.ECODE_INVAL)
1201

    
1202
  return 0
1203

    
1204

    
1205
def _OobPower(opts, node_list, power):
1206
  """Puts the node in the list to desired power state.
1207

1208
  @param opts: The command line options selected by the user
1209
  @param node_list: The list of nodes to operate on
1210
  @param power: True if they should be powered on, False otherwise
1211
  @return: The success of the operation (none failed)
1212

1213
  """
1214
  if power:
1215
    command = constants.OOB_POWER_ON
1216
  else:
1217
    command = constants.OOB_POWER_OFF
1218

    
1219
  op = opcodes.OpOobCommand(node_names=node_list,
1220
                            command=command,
1221
                            ignore_status=True,
1222
                            timeout=opts.oob_timeout,
1223
                            power_delay=opts.power_delay)
1224
  result = SubmitOpCode(op, opts=opts)
1225
  errs = 0
1226
  for node_result in result:
1227
    (node_tuple, data_tuple) = node_result
1228
    (_, node_name) = node_tuple
1229
    (data_status, _) = data_tuple
1230
    if data_status != constants.RS_NORMAL:
1231
      assert data_status != constants.RS_UNAVAIL
1232
      errs += 1
1233
      ToStderr("There was a problem changing power for %s, please investigate",
1234
               node_name)
1235

    
1236
  if errs > 0:
1237
    return False
1238

    
1239
  return True
1240

    
1241

    
1242
def _InstanceStart(opts, inst_list, start, no_remember=False):
1243
  """Puts the instances in the list to desired state.
1244

1245
  @param opts: The command line options selected by the user
1246
  @param inst_list: The list of instances to operate on
1247
  @param start: True if they should be started, False for shutdown
1248
  @param no_remember: If the instance state should be remembered
1249
  @return: The success of the operation (none failed)
1250

1251
  """
1252
  if start:
1253
    opcls = opcodes.OpInstanceStartup
1254
    text_submit, text_success, text_failed = ("startup", "started", "starting")
1255
  else:
1256
    opcls = compat.partial(opcodes.OpInstanceShutdown,
1257
                           timeout=opts.shutdown_timeout,
1258
                           no_remember=no_remember)
1259
    text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
1260

    
1261
  jex = JobExecutor(opts=opts)
1262

    
1263
  for inst in inst_list:
1264
    ToStdout("Submit %s of instance %s", text_submit, inst)
1265
    op = opcls(instance_name=inst)
1266
    jex.QueueJob(inst, op)
1267

    
1268
  results = jex.GetResults()
1269
  bad_cnt = len([1 for (success, _) in results if not success])
1270

    
1271
  if bad_cnt == 0:
1272
    ToStdout("All instances have been %s successfully", text_success)
1273
  else:
1274
    ToStderr("There were errors while %s instances:\n"
1275
             "%d error(s) out of %d instance(s)", text_failed, bad_cnt,
1276
             len(results))
1277
    return False
1278

    
1279
  return True
1280

    
1281

    
1282
class _RunWhenNodesReachableHelper:
1283
  """Helper class to make shared internal state sharing easier.
1284

1285
  @ivar success: Indicates if all action_cb calls were successful
1286

1287
  """
1288
  def __init__(self, node_list, action_cb, node2ip, port, feedback_fn,
1289
               _ping_fn=netutils.TcpPing, _sleep_fn=time.sleep):
1290
    """Init the object.
1291

1292
    @param node_list: The list of nodes to be reachable
1293
    @param action_cb: Callback called when a new host is reachable
1294
    @type node2ip: dict
1295
    @param node2ip: Node to ip mapping
1296
    @param port: The port to use for the TCP ping
1297
    @param feedback_fn: The function used for feedback
1298
    @param _ping_fn: Function to check reachabilty (for unittest use only)
1299
    @param _sleep_fn: Function to sleep (for unittest use only)
1300

1301
    """
1302
    self.down = set(node_list)
1303
    self.up = set()
1304
    self.node2ip = node2ip
1305
    self.success = True
1306
    self.action_cb = action_cb
1307
    self.port = port
1308
    self.feedback_fn = feedback_fn
1309
    self._ping_fn = _ping_fn
1310
    self._sleep_fn = _sleep_fn
1311

    
1312
  def __call__(self):
1313
    """When called we run action_cb.
1314

1315
    @raises utils.RetryAgain: When there are still down nodes
1316

1317
    """
1318
    if not self.action_cb(self.up):
1319
      self.success = False
1320

    
1321
    if self.down:
1322
      raise utils.RetryAgain()
1323
    else:
1324
      return self.success
1325

    
1326
  def Wait(self, secs):
1327
    """Checks if a host is up or waits remaining seconds.
1328

1329
    @param secs: The secs remaining
1330

1331
    """
1332
    start = time.time()
1333
    for node in self.down:
1334
      if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT,
1335
                       live_port_needed=True):
1336
        self.feedback_fn("Node %s became available" % node)
1337
        self.up.add(node)
1338
        self.down -= self.up
1339
        # If we have a node available there is the possibility to run the
1340
        # action callback successfully, therefore we don't wait and return
1341
        return
1342

    
1343
    self._sleep_fn(max(0.0, start + secs - time.time()))
1344

    
1345

    
1346
def _RunWhenNodesReachable(node_list, action_cb, interval):
1347
  """Run action_cb when nodes become reachable.
1348

1349
  @param node_list: The list of nodes to be reachable
1350
  @param action_cb: Callback called when a new host is reachable
1351
  @param interval: The earliest time to retry
1352

1353
  """
1354
  client = GetClient()
1355
  cluster_info = client.QueryClusterInfo()
1356
  if cluster_info["primary_ip_version"] == constants.IP4_VERSION:
1357
    family = netutils.IPAddress.family
1358
  else:
1359
    family = netutils.IP6Address.family
1360

    
1361
  node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
1362
                 for node in node_list)
1363

    
1364
  port = netutils.GetDaemonPort(constants.NODED)
1365
  helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port,
1366
                                        ToStdout)
1367

    
1368
  try:
1369
    return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
1370
                       wait_fn=helper.Wait)
1371
  except utils.RetryTimeout:
1372
    ToStderr("Time exceeded while waiting for nodes to become reachable"
1373
             " again:\n  - %s", "  - ".join(helper.down))
1374
    return False
1375

    
1376

    
1377
def _MaybeInstanceStartup(opts, inst_map, nodes_online,
1378
                          _instance_start_fn=_InstanceStart):
1379
  """Start the instances conditional based on node_states.
1380

1381
  @param opts: The command line options selected by the user
1382
  @param inst_map: A dict of inst -> nodes mapping
1383
  @param nodes_online: A list of nodes online
1384
  @param _instance_start_fn: Callback to start instances (unittest use only)
1385
  @return: Success of the operation on all instances
1386

1387
  """
1388
  start_inst_list = []
1389
  for (inst, nodes) in inst_map.items():
1390
    if not (nodes - nodes_online):
1391
      # All nodes the instance lives on are back online
1392
      start_inst_list.append(inst)
1393

    
1394
  for inst in start_inst_list:
1395
    del inst_map[inst]
1396

    
1397
  if start_inst_list:
1398
    return _instance_start_fn(opts, start_inst_list, True)
1399

    
1400
  return True
1401

    
1402

    
1403
def _EpoOn(opts, full_node_list, node_list, inst_map):
1404
  """Does the actual power on.
1405

1406
  @param opts: The command line options selected by the user
1407
  @param full_node_list: All nodes to operate on (includes nodes not supporting
1408
                         OOB)
1409
  @param node_list: The list of nodes to operate on (all need to support OOB)
1410
  @param inst_map: A dict of inst -> nodes mapping
1411
  @return: The desired exit status
1412

1413
  """
1414
  if node_list and not _OobPower(opts, node_list, False):
1415
    ToStderr("Not all nodes seem to get back up, investigate and start"
1416
             " manually if needed")
1417

    
1418
  # Wait for the nodes to be back up
1419
  action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
1420

    
1421
  ToStdout("Waiting until all nodes are available again")
1422
  if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL):
1423
    ToStderr("Please investigate and start stopped instances manually")
1424
    return constants.EXIT_FAILURE
1425

    
1426
  return constants.EXIT_SUCCESS
1427

    
1428

    
1429
def _EpoOff(opts, node_list, inst_map):
1430
  """Does the actual power off.
1431

1432
  @param opts: The command line options selected by the user
1433
  @param node_list: The list of nodes to operate on (all need to support OOB)
1434
  @param inst_map: A dict of inst -> nodes mapping
1435
  @return: The desired exit status
1436

1437
  """
1438
  if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True):
1439
    ToStderr("Please investigate and stop instances manually before continuing")
1440
    return constants.EXIT_FAILURE
1441

    
1442
  if not node_list:
1443
    return constants.EXIT_SUCCESS
1444

    
1445
  if _OobPower(opts, node_list, False):
1446
    return constants.EXIT_SUCCESS
1447
  else:
1448
    return constants.EXIT_FAILURE
1449

    
1450

    
1451
def Epo(opts, args, cl=None, _on_fn=_EpoOn, _off_fn=_EpoOff,
1452
        _confirm_fn=ConfirmOperation,
1453
        _stdout_fn=ToStdout, _stderr_fn=ToStderr):
1454
  """EPO operations.
1455

1456
  @param opts: the command line options selected by the user
1457
  @type args: list
1458
  @param args: should contain only one element, the subcommand
1459
  @rtype: int
1460
  @return: the desired exit code
1461

1462
  """
1463
  if opts.groups and opts.show_all:
1464
    _stderr_fn("Only one of --groups or --all are allowed")
1465
    return constants.EXIT_FAILURE
1466
  elif args and opts.show_all:
1467
    _stderr_fn("Arguments in combination with --all are not allowed")
1468
    return constants.EXIT_FAILURE
1469

    
1470
  if cl is None:
1471
    cl = GetClient()
1472

    
1473
  if opts.groups:
1474
    node_query_list = \
1475
      itertools.chain(*cl.QueryGroups(args, ["node_list"], False))
1476
  else:
1477
    node_query_list = args
1478

    
1479
  result = cl.QueryNodes(node_query_list, ["name", "master", "pinst_list",
1480
                                           "sinst_list", "powered", "offline"],
1481
                         False)
1482

    
1483
  all_nodes = map(compat.fst, result)
1484
  node_list = []
1485
  inst_map = {}
1486
  for (node, master, pinsts, sinsts, powered, offline) in result:
1487
    if not offline:
1488
      for inst in (pinsts + sinsts):
1489
        if inst in inst_map:
1490
          if not master:
1491
            inst_map[inst].add(node)
1492
        elif master:
1493
          inst_map[inst] = set()
1494
        else:
1495
          inst_map[inst] = set([node])
1496

    
1497
    if master and opts.on:
1498
      # We ignore the master for turning on the machines, in fact we are
1499
      # already operating on the master at this point :)
1500
      continue
1501
    elif master and not opts.show_all:
1502
      _stderr_fn("%s is the master node, please do a master-failover to another"
1503
                 " node not affected by the EPO or use --all if you intend to"
1504
                 " shutdown the whole cluster", node)
1505
      return constants.EXIT_FAILURE
1506
    elif powered is None:
1507
      _stdout_fn("Node %s does not support out-of-band handling, it can not be"
1508
                 " handled in a fully automated manner", node)
1509
    elif powered == opts.on:
1510
      _stdout_fn("Node %s is already in desired power state, skipping", node)
1511
    elif not offline or (offline and powered):
1512
      node_list.append(node)
1513

    
1514
  if not (opts.force or _confirm_fn(all_nodes, "nodes", "epo")):
1515
    return constants.EXIT_FAILURE
1516

    
1517
  if opts.on:
1518
    return _on_fn(opts, all_nodes, node_list, inst_map)
1519
  else:
1520
    return _off_fn(opts, node_list, inst_map)
1521

    
1522

    
1523
def _GetCreateCommand(info):
1524
  buf = StringIO()
1525
  buf.write("gnt-cluster init")
1526
  PrintIPolicyCommand(buf, info["ipolicy"], False)
1527
  buf.write(" ")
1528
  buf.write(info["name"])
1529
  return buf.getvalue()
1530

    
1531

    
1532
def ShowCreateCommand(opts, args):
1533
  """Shows the command that can be used to re-create the cluster.
1534

1535
  Currently it works only for ipolicy specs.
1536

1537
  """
1538
  cl = GetClient(query=True)
1539
  result = cl.QueryClusterInfo()
1540
  ToStdout(_GetCreateCommand(result))
1541

    
1542

    
1543
commands = {
1544
  "init": (
1545
    InitCluster, [ArgHost(min=1, max=1)],
1546
    [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
1547
     HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT,
1548
     NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, NOMODIFY_ETCHOSTS_OPT,
1549
     NOMODIFY_SSH_SETUP_OPT, SECONDARY_IP_OPT, VG_NAME_OPT,
1550
     MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, DRBD_HELPER_OPT, NODRBD_STORAGE_OPT,
1551
     DEFAULT_IALLOCATOR_OPT, PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT,
1552
     NODE_PARAMS_OPT, GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT,
1553
     DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT,
1554
     IPOLICY_STD_SPECS_OPT] + INSTANCE_POLICY_OPTS + SPLIT_ISPECS_OPTS,
1555
    "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
1556
  "destroy": (
1557
    DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
1558
    "", "Destroy cluster"),
1559
  "rename": (
1560
    RenameCluster, [ArgHost(min=1, max=1)],
1561
    [FORCE_OPT, DRY_RUN_OPT],
1562
    "<new_name>",
1563
    "Renames the cluster"),
1564
  "redist-conf": (
1565
    RedistributeConfig, ARGS_NONE, SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT],
1566
    "", "Forces a push of the configuration file and ssconf files"
1567
    " to the nodes in the cluster"),
1568
  "verify": (
1569
    VerifyCluster, ARGS_NONE,
1570
    [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
1571
     DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT, IGNORE_ERRORS_OPT],
1572
    "", "Does a check on the cluster configuration"),
1573
  "verify-disks": (
1574
    VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
1575
    "", "Does a check on the cluster disk status"),
1576
  "repair-disk-sizes": (
1577
    RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
1578
    "[instance...]", "Updates mismatches in recorded disk sizes"),
1579
  "master-failover": (
1580
    MasterFailover, ARGS_NONE, [NOVOTING_OPT, FORCE_FAILOVER],
1581
    "", "Makes the current node the master"),
1582
  "master-ping": (
1583
    MasterPing, ARGS_NONE, [],
1584
    "", "Checks if the master is alive"),
1585
  "version": (
1586
    ShowClusterVersion, ARGS_NONE, [],
1587
    "", "Shows the cluster version"),
1588
  "getmaster": (
1589
    ShowClusterMaster, ARGS_NONE, [],
1590
    "", "Shows the cluster master"),
1591
  "copyfile": (
1592
    ClusterCopyFile, [ArgFile(min=1, max=1)],
1593
    [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
1594
    "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
1595
  "command": (
1596
    RunClusterCommand, [ArgCommand(min=1)],
1597
    [NODE_LIST_OPT, NODEGROUP_OPT, SHOW_MACHINE_OPT, FAILURE_ONLY_OPT],
1598
    "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
1599
  "info": (
1600
    ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
1601
    "[--roman]", "Show cluster configuration"),
1602
  "list-tags": (
1603
    ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
1604
  "add-tags": (
1605
    AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
1606
    "tag...", "Add tags to the cluster"),
1607
  "remove-tags": (
1608
    RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
1609
    "tag...", "Remove tags from the cluster"),
1610
  "search-tags": (
1611
    SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
1612
    "Searches the tags on all objects on"
1613
    " the cluster for a given pattern (regex)"),
1614
  "queue": (
1615
    QueueOps,
1616
    [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
1617
    [], "drain|undrain|info", "Change queue properties"),
1618
  "watcher": (
1619
    WatcherOps,
1620
    [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
1621
     ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
1622
    [],
1623
    "{pause <timespec>|continue|info}", "Change watcher properties"),
1624
  "modify": (
1625
    SetClusterParams, ARGS_NONE,
1626
    [FORCE_OPT,
1627
     BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
1628
     MASTER_NETMASK_OPT, NIC_PARAMS_OPT, NOLVM_STORAGE_OPT, VG_NAME_OPT,
1629
     MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT,
1630
     DRBD_HELPER_OPT, NODRBD_STORAGE_OPT, DEFAULT_IALLOCATOR_OPT,
1631
     RESERVED_LVS_OPT, DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT,
1632
     NODE_PARAMS_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT,
1633
     DISK_STATE_OPT] + SUBMIT_OPTS +
1634
     [ENABLED_DISK_TEMPLATES_OPT, IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT] +
1635
     INSTANCE_POLICY_OPTS,
1636
    "[opts...]",
1637
    "Alters the parameters of the cluster"),
1638
  "renew-crypto": (
1639
    RenewCrypto, ARGS_NONE,
1640
    [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
1641
     NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
1642
     NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
1643
     NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT],
1644
    "[opts...]",
1645
    "Renews cluster certificates, keys and secrets"),
1646
  "epo": (
1647
    Epo, [ArgUnknown()],
1648
    [FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT,
1649
     SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
1650
    "[opts...] [args]",
1651
    "Performs an emergency power-off on given args"),
1652
  "activate-master-ip": (
1653
    ActivateMasterIp, ARGS_NONE, [], "", "Activates the master IP"),
1654
  "deactivate-master-ip": (
1655
    DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
1656
    "Deactivates the master IP"),
1657
  "show-ispecs-cmd": (
1658
    ShowCreateCommand, ARGS_NONE, [], "",
1659
    "Show the command line to re-create the cluster"),
1660
  }
1661

    
1662

    
1663
#: dictionary with aliases for commands
1664
aliases = {
1665
  "masterfailover": "master-failover",
1666
  "show": "info",
1667
}
1668

    
1669

    
1670
def Main():
1671
  return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER},
1672
                     aliases=aliases)