Statistics
| Branch: | Tag: | Revision:

root / lib / client / gnt_cluster.py @ 52261ad2

History | View | Annotate | Download (70.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21
"""Cluster related commands"""
22

    
23
# pylint: disable=W0401,W0613,W0614,C0103
24
# W0401: Wildcard import ganeti.cli
25
# W0613: Unused argument, since all functions follow the same API
26
# W0614: Unused import %s from wildcard import (since we need cli)
27
# C0103: Invalid name gnt-cluster
28

    
29
from cStringIO import StringIO
30
import os
31
import time
32
import OpenSSL
33
import itertools
34

    
35
from ganeti.cli import *
36
from ganeti import opcodes
37
from ganeti import constants
38
from ganeti import errors
39
from ganeti import utils
40
from ganeti import bootstrap
41
from ganeti import ssh
42
from ganeti import objects
43
from ganeti import uidpool
44
from ganeti import compat
45
from ganeti import netutils
46
from ganeti import ssconf
47
from ganeti import pathutils
48
from ganeti import serializer
49
from ganeti import qlang
50

    
51

    
52
ON_OPT = cli_option("--on", default=False,
53
                    action="store_true", dest="on",
54
                    help="Recover from an EPO")
55

    
56
GROUPS_OPT = cli_option("--groups", default=False,
57
                        action="store_true", dest="groups",
58
                        help="Arguments are node groups instead of nodes")
59

    
60
FORCE_FAILOVER = cli_option("--yes-do-it", dest="yes_do_it",
61
                            help="Override interactive check for --no-voting",
62
                            default=False, action="store_true")
63

    
64
FORCE_DISTRIBUTION = cli_option("--yes-do-it", dest="yes_do_it",
65
                                help="Unconditionally distribute the"
66
                                " configuration, even if the queue"
67
                                " is drained",
68
                                default=False, action="store_true")
69

    
70
TO_OPT = cli_option("--to", default=None, type="string",
71
                    help="The Ganeti version to upgrade to")
72

    
73
RESUME_OPT = cli_option("--resume", default=False, action="store_true",
74
                        help="Resume any pending Ganeti upgrades")
75

    
76
_EPO_PING_INTERVAL = 30 # 30 seconds between pings
77
_EPO_PING_TIMEOUT = 1 # 1 second
78
_EPO_REACHABLE_TIMEOUT = 15 * 60 # 15 minutes
79

    
80

    
81
def _InitEnabledDiskTemplates(opts):
82
  """Initialize the list of enabled disk templates.
83

84
  """
85
  if opts.enabled_disk_templates:
86
    return opts.enabled_disk_templates.split(",")
87
  else:
88
    return constants.DEFAULT_ENABLED_DISK_TEMPLATES
89

    
90

    
91
def _InitVgName(opts, enabled_disk_templates):
92
  """Initialize the volume group name.
93

94
  @type enabled_disk_templates: list of strings
95
  @param enabled_disk_templates: cluster-wide enabled disk templates
96

97
  """
98
  vg_name = None
99
  if opts.vg_name is not None:
100
    vg_name = opts.vg_name
101
    if vg_name:
102
      if not utils.IsLvmEnabled(enabled_disk_templates):
103
        ToStdout("You specified a volume group with --vg-name, but you did not"
104
                 " enable any disk template that uses lvm.")
105
    elif utils.IsLvmEnabled(enabled_disk_templates):
106
      raise errors.OpPrereqError(
107
          "LVM disk templates are enabled, but vg name not set.")
108
  elif utils.IsLvmEnabled(enabled_disk_templates):
109
    vg_name = constants.DEFAULT_VG
110
  return vg_name
111

    
112

    
113
def _InitDrbdHelper(opts, enabled_disk_templates):
114
  """Initialize the DRBD usermode helper.
115

116
  """
117
  drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
118

    
119
  if not drbd_enabled and opts.drbd_helper is not None:
120
    ToStdout("Note: You specified a DRBD usermode helper, while DRBD storage"
121
             " is not enabled.")
122

    
123
  if drbd_enabled:
124
    if opts.drbd_helper is None:
125
      return constants.DEFAULT_DRBD_HELPER
126
    if opts.drbd_helper == '':
127
      raise errors.OpPrereqError(
128
          "Unsetting the drbd usermode helper while enabling DRBD is not"
129
          " allowed.")
130

    
131
  return opts.drbd_helper
132

    
133

    
134
@UsesRPC
135
def InitCluster(opts, args):
136
  """Initialize the cluster.
137

138
  @param opts: the command line options selected by the user
139
  @type args: list
140
  @param args: should contain only one element, the desired
141
      cluster name
142
  @rtype: int
143
  @return: the desired exit code
144

145
  """
146
  enabled_disk_templates = _InitEnabledDiskTemplates(opts)
147

    
148
  try:
149
    vg_name = _InitVgName(opts, enabled_disk_templates)
150
    drbd_helper = _InitDrbdHelper(opts, enabled_disk_templates)
151
  except errors.OpPrereqError, e:
152
    ToStderr(str(e))
153
    return 1
154

    
155
  master_netdev = opts.master_netdev
156
  if master_netdev is None:
157
    nic_mode = opts.nicparams.get(constants.NIC_MODE, None)
158
    if not nic_mode:
159
      # default case, use bridging
160
      master_netdev = constants.DEFAULT_BRIDGE
161
    elif nic_mode == constants.NIC_MODE_OVS:
162
      # default ovs is different from default bridge
163
      master_netdev = constants.DEFAULT_OVS
164
      opts.nicparams[constants.NIC_LINK] = constants.DEFAULT_OVS
165

    
166
  hvlist = opts.enabled_hypervisors
167
  if hvlist is None:
168
    hvlist = constants.DEFAULT_ENABLED_HYPERVISOR
169
  hvlist = hvlist.split(",")
170

    
171
  hvparams = dict(opts.hvparams)
172
  beparams = opts.beparams
173
  nicparams = opts.nicparams
174

    
175
  diskparams = dict(opts.diskparams)
176

    
177
  # check the disk template types here, as we cannot rely on the type check done
178
  # by the opcode parameter types
179
  diskparams_keys = set(diskparams.keys())
180
  if not (diskparams_keys <= constants.DISK_TEMPLATES):
181
    unknown = utils.NiceSort(diskparams_keys - constants.DISK_TEMPLATES)
182
    ToStderr("Disk templates unknown: %s" % utils.CommaJoin(unknown))
183
    return 1
184

    
185
  # prepare beparams dict
186
  beparams = objects.FillDict(constants.BEC_DEFAULTS, beparams)
187
  utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
188

    
189
  # prepare nicparams dict
190
  nicparams = objects.FillDict(constants.NICC_DEFAULTS, nicparams)
191
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
192

    
193
  # prepare ndparams dict
194
  if opts.ndparams is None:
195
    ndparams = dict(constants.NDC_DEFAULTS)
196
  else:
197
    ndparams = objects.FillDict(constants.NDC_DEFAULTS, opts.ndparams)
198
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
199

    
200
  # prepare hvparams dict
201
  for hv in constants.HYPER_TYPES:
202
    if hv not in hvparams:
203
      hvparams[hv] = {}
204
    hvparams[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], hvparams[hv])
205
    utils.ForceDictType(hvparams[hv], constants.HVS_PARAMETER_TYPES)
206

    
207
  # prepare diskparams dict
208
  for templ in constants.DISK_TEMPLATES:
209
    if templ not in diskparams:
210
      diskparams[templ] = {}
211
    diskparams[templ] = objects.FillDict(constants.DISK_DT_DEFAULTS[templ],
212
                                         diskparams[templ])
213
    utils.ForceDictType(diskparams[templ], constants.DISK_DT_TYPES)
214

    
215
  # prepare ipolicy dict
216
  ipolicy = CreateIPolicyFromOpts(
217
    ispecs_mem_size=opts.ispecs_mem_size,
218
    ispecs_cpu_count=opts.ispecs_cpu_count,
219
    ispecs_disk_count=opts.ispecs_disk_count,
220
    ispecs_disk_size=opts.ispecs_disk_size,
221
    ispecs_nic_count=opts.ispecs_nic_count,
222
    minmax_ispecs=opts.ipolicy_bounds_specs,
223
    std_ispecs=opts.ipolicy_std_specs,
224
    ipolicy_disk_templates=opts.ipolicy_disk_templates,
225
    ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
226
    ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
227
    fill_all=True)
228

    
229
  if opts.candidate_pool_size is None:
230
    opts.candidate_pool_size = constants.MASTER_POOL_SIZE_DEFAULT
231

    
232
  if opts.mac_prefix is None:
233
    opts.mac_prefix = constants.DEFAULT_MAC_PREFIX
234

    
235
  uid_pool = opts.uid_pool
236
  if uid_pool is not None:
237
    uid_pool = uidpool.ParseUidPool(uid_pool)
238

    
239
  if opts.prealloc_wipe_disks is None:
240
    opts.prealloc_wipe_disks = False
241

    
242
  external_ip_setup_script = opts.use_external_mip_script
243
  if external_ip_setup_script is None:
244
    external_ip_setup_script = False
245

    
246
  try:
247
    primary_ip_version = int(opts.primary_ip_version)
248
  except (ValueError, TypeError), err:
249
    ToStderr("Invalid primary ip version value: %s" % str(err))
250
    return 1
251

    
252
  master_netmask = opts.master_netmask
253
  try:
254
    if master_netmask is not None:
255
      master_netmask = int(master_netmask)
256
  except (ValueError, TypeError), err:
257
    ToStderr("Invalid master netmask value: %s" % str(err))
258
    return 1
259

    
260
  if opts.disk_state:
261
    disk_state = utils.FlatToDict(opts.disk_state)
262
  else:
263
    disk_state = {}
264

    
265
  hv_state = dict(opts.hv_state)
266

    
267
  default_ialloc_params = opts.default_iallocator_params
268
  bootstrap.InitCluster(cluster_name=args[0],
269
                        secondary_ip=opts.secondary_ip,
270
                        vg_name=vg_name,
271
                        mac_prefix=opts.mac_prefix,
272
                        master_netmask=master_netmask,
273
                        master_netdev=master_netdev,
274
                        file_storage_dir=opts.file_storage_dir,
275
                        shared_file_storage_dir=opts.shared_file_storage_dir,
276
                        gluster_storage_dir=opts.gluster_storage_dir,
277
                        enabled_hypervisors=hvlist,
278
                        hvparams=hvparams,
279
                        beparams=beparams,
280
                        nicparams=nicparams,
281
                        ndparams=ndparams,
282
                        diskparams=diskparams,
283
                        ipolicy=ipolicy,
284
                        candidate_pool_size=opts.candidate_pool_size,
285
                        modify_etc_hosts=opts.modify_etc_hosts,
286
                        modify_ssh_setup=opts.modify_ssh_setup,
287
                        maintain_node_health=opts.maintain_node_health,
288
                        drbd_helper=drbd_helper,
289
                        uid_pool=uid_pool,
290
                        default_iallocator=opts.default_iallocator,
291
                        default_iallocator_params=default_ialloc_params,
292
                        primary_ip_version=primary_ip_version,
293
                        prealloc_wipe_disks=opts.prealloc_wipe_disks,
294
                        use_external_mip_script=external_ip_setup_script,
295
                        hv_state=hv_state,
296
                        disk_state=disk_state,
297
                        enabled_disk_templates=enabled_disk_templates,
298
                        )
299
  op = opcodes.OpClusterPostInit()
300
  SubmitOpCode(op, opts=opts)
301
  return 0
302

    
303

    
304
@UsesRPC
305
def DestroyCluster(opts, args):
306
  """Destroy the cluster.
307

308
  @param opts: the command line options selected by the user
309
  @type args: list
310
  @param args: should be an empty list
311
  @rtype: int
312
  @return: the desired exit code
313

314
  """
315
  if not opts.yes_do_it:
316
    ToStderr("Destroying a cluster is irreversible. If you really want"
317
             " destroy this cluster, supply the --yes-do-it option.")
318
    return 1
319

    
320
  op = opcodes.OpClusterDestroy()
321
  master_uuid = SubmitOpCode(op, opts=opts)
322
  # if we reached this, the opcode didn't fail; we can proceed to
323
  # shutdown all the daemons
324
  bootstrap.FinalizeClusterDestroy(master_uuid)
325
  return 0
326

    
327

    
328
def RenameCluster(opts, args):
329
  """Rename the cluster.
330

331
  @param opts: the command line options selected by the user
332
  @type args: list
333
  @param args: should contain only one element, the new cluster name
334
  @rtype: int
335
  @return: the desired exit code
336

337
  """
338
  cl = GetClient()
339

    
340
  (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
341

    
342
  new_name = args[0]
343
  if not opts.force:
344
    usertext = ("This will rename the cluster from '%s' to '%s'. If you are"
345
                " connected over the network to the cluster name, the"
346
                " operation is very dangerous as the IP address will be"
347
                " removed from the node and the change may not go through."
348
                " Continue?") % (cluster_name, new_name)
349
    if not AskUser(usertext):
350
      return 1
351

    
352
  op = opcodes.OpClusterRename(name=new_name)
353
  result = SubmitOpCode(op, opts=opts, cl=cl)
354

    
355
  if result:
356
    ToStdout("Cluster renamed from '%s' to '%s'", cluster_name, result)
357

    
358
  return 0
359

    
360

    
361
def ActivateMasterIp(opts, args):
362
  """Activates the master IP.
363

364
  """
365
  op = opcodes.OpClusterActivateMasterIp()
366
  SubmitOpCode(op)
367
  return 0
368

    
369

    
370
def DeactivateMasterIp(opts, args):
371
  """Deactivates the master IP.
372

373
  """
374
  if not opts.confirm:
375
    usertext = ("This will disable the master IP. All the open connections to"
376
                " the master IP will be closed. To reach the master you will"
377
                " need to use its node IP."
378
                " Continue?")
379
    if not AskUser(usertext):
380
      return 1
381

    
382
  op = opcodes.OpClusterDeactivateMasterIp()
383
  SubmitOpCode(op)
384
  return 0
385

    
386

    
387
def RedistributeConfig(opts, args):
388
  """Forces push of the cluster configuration.
389

390
  @param opts: the command line options selected by the user
391
  @type args: list
392
  @param args: empty list
393
  @rtype: int
394
  @return: the desired exit code
395

396
  """
397
  op = opcodes.OpClusterRedistConf()
398
  if opts.yes_do_it:
399
    SubmitOpCodeToDrainedQueue(op)
400
  else:
401
    SubmitOrSend(op, opts)
402
  return 0
403

    
404

    
405
def ShowClusterVersion(opts, args):
406
  """Write version of ganeti software to the standard output.
407

408
  @param opts: the command line options selected by the user
409
  @type args: list
410
  @param args: should be an empty list
411
  @rtype: int
412
  @return: the desired exit code
413

414
  """
415
  cl = GetClient(query=True)
416
  result = cl.QueryClusterInfo()
417
  ToStdout("Software version: %s", result["software_version"])
418
  ToStdout("Internode protocol: %s", result["protocol_version"])
419
  ToStdout("Configuration format: %s", result["config_version"])
420
  ToStdout("OS api version: %s", result["os_api_version"])
421
  ToStdout("Export interface: %s", result["export_version"])
422
  ToStdout("VCS version: %s", result["vcs_version"])
423
  return 0
424

    
425

    
426
def ShowClusterMaster(opts, args):
427
  """Write name of master node to the standard output.
428

429
  @param opts: the command line options selected by the user
430
  @type args: list
431
  @param args: should be an empty list
432
  @rtype: int
433
  @return: the desired exit code
434

435
  """
436
  master = bootstrap.GetMaster()
437
  ToStdout(master)
438
  return 0
439

    
440

    
441
def _FormatGroupedParams(paramsdict, roman=False):
442
  """Format Grouped parameters (be, nic, disk) by group.
443

444
  @type paramsdict: dict of dicts
445
  @param paramsdict: {group: {param: value, ...}, ...}
446
  @rtype: dict of dicts
447
  @return: copy of the input dictionaries with strings as values
448

449
  """
450
  ret = {}
451
  for (item, val) in paramsdict.items():
452
    if isinstance(val, dict):
453
      ret[item] = _FormatGroupedParams(val, roman=roman)
454
    elif roman and isinstance(val, int):
455
      ret[item] = compat.TryToRoman(val)
456
    else:
457
      ret[item] = str(val)
458
  return ret
459

    
460

    
461
def ShowClusterConfig(opts, args):
462
  """Shows cluster information.
463

464
  @param opts: the command line options selected by the user
465
  @type args: list
466
  @param args: should be an empty list
467
  @rtype: int
468
  @return: the desired exit code
469

470
  """
471
  cl = GetClient(query=True)
472
  result = cl.QueryClusterInfo()
473

    
474
  if result["tags"]:
475
    tags = utils.CommaJoin(utils.NiceSort(result["tags"]))
476
  else:
477
    tags = "(none)"
478
  if result["reserved_lvs"]:
479
    reserved_lvs = utils.CommaJoin(result["reserved_lvs"])
480
  else:
481
    reserved_lvs = "(none)"
482

    
483
  enabled_hv = result["enabled_hypervisors"]
484
  hvparams = dict((k, v) for k, v in result["hvparams"].iteritems()
485
                  if k in enabled_hv)
486

    
487
  info = [
488
    ("Cluster name", result["name"]),
489
    ("Cluster UUID", result["uuid"]),
490

    
491
    ("Creation time", utils.FormatTime(result["ctime"])),
492
    ("Modification time", utils.FormatTime(result["mtime"])),
493

    
494
    ("Master node", result["master"]),
495

    
496
    ("Architecture (this node)",
497
     "%s (%s)" % (result["architecture"][0], result["architecture"][1])),
498

    
499
    ("Tags", tags),
500

    
501
    ("Default hypervisor", result["default_hypervisor"]),
502
    ("Enabled hypervisors", utils.CommaJoin(enabled_hv)),
503

    
504
    ("Hypervisor parameters", _FormatGroupedParams(hvparams)),
505

    
506
    ("OS-specific hypervisor parameters",
507
     _FormatGroupedParams(result["os_hvp"])),
508

    
509
    ("OS parameters", _FormatGroupedParams(result["osparams"])),
510

    
511
    ("Hidden OSes", utils.CommaJoin(result["hidden_os"])),
512
    ("Blacklisted OSes", utils.CommaJoin(result["blacklisted_os"])),
513

    
514
    ("Cluster parameters", [
515
      ("candidate pool size",
516
       compat.TryToRoman(result["candidate_pool_size"],
517
                         convert=opts.roman_integers)),
518
      ("master netdev", result["master_netdev"]),
519
      ("master netmask", result["master_netmask"]),
520
      ("use external master IP address setup script",
521
       result["use_external_mip_script"]),
522
      ("lvm volume group", result["volume_group_name"]),
523
      ("lvm reserved volumes", reserved_lvs),
524
      ("drbd usermode helper", result["drbd_usermode_helper"]),
525
      ("file storage path", result["file_storage_dir"]),
526
      ("shared file storage path", result["shared_file_storage_dir"]),
527
      ("gluster storage path", result["gluster_storage_dir"]),
528
      ("maintenance of node health", result["maintain_node_health"]),
529
      ("uid pool", uidpool.FormatUidPool(result["uid_pool"])),
530
      ("default instance allocator", result["default_iallocator"]),
531
      ("default instance allocator parameters",
532
       result["default_iallocator_params"]),
533
      ("primary ip version", result["primary_ip_version"]),
534
      ("preallocation wipe disks", result["prealloc_wipe_disks"]),
535
      ("OS search path", utils.CommaJoin(pathutils.OS_SEARCH_PATH)),
536
      ("ExtStorage Providers search path",
537
       utils.CommaJoin(pathutils.ES_SEARCH_PATH)),
538
      ("enabled disk templates",
539
       utils.CommaJoin(result["enabled_disk_templates"])),
540
      ]),
541

    
542
    ("Default node parameters",
543
     _FormatGroupedParams(result["ndparams"], roman=opts.roman_integers)),
544

    
545
    ("Default instance parameters",
546
     _FormatGroupedParams(result["beparams"], roman=opts.roman_integers)),
547

    
548
    ("Default nic parameters",
549
     _FormatGroupedParams(result["nicparams"], roman=opts.roman_integers)),
550

    
551
    ("Default disk parameters",
552
     _FormatGroupedParams(result["diskparams"], roman=opts.roman_integers)),
553

    
554
    ("Instance policy - limits for instances",
555
     FormatPolicyInfo(result["ipolicy"], None, True)),
556
    ]
557

    
558
  PrintGenericInfo(info)
559
  return 0
560

    
561

    
562
def ClusterCopyFile(opts, args):
563
  """Copy a file from master to some nodes.
564

565
  @param opts: the command line options selected by the user
566
  @type args: list
567
  @param args: should contain only one element, the path of
568
      the file to be copied
569
  @rtype: int
570
  @return: the desired exit code
571

572
  """
573
  filename = args[0]
574
  if not os.path.exists(filename):
575
    raise errors.OpPrereqError("No such filename '%s'" % filename,
576
                               errors.ECODE_INVAL)
577

    
578
  cl = GetClient()
579
  qcl = GetClient(query=True)
580
  try:
581
    cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
582

    
583
    results = GetOnlineNodes(nodes=opts.nodes, cl=qcl, filter_master=True,
584
                             secondary_ips=opts.use_replication_network,
585
                             nodegroup=opts.nodegroup)
586
    ports = GetNodesSshPorts(opts.nodes, qcl)
587
  finally:
588
    cl.Close()
589
    qcl.Close()
590

    
591
  srun = ssh.SshRunner(cluster_name)
592
  for (node, port) in zip(results, ports):
593
    if not srun.CopyFileToNode(node, port, filename):
594
      ToStderr("Copy of file %s to node %s:%d failed", filename, node, port)
595

    
596
  return 0
597

    
598

    
599
def RunClusterCommand(opts, args):
600
  """Run a command on some nodes.
601

602
  @param opts: the command line options selected by the user
603
  @type args: list
604
  @param args: should contain the command to be run and its arguments
605
  @rtype: int
606
  @return: the desired exit code
607

608
  """
609
  cl = GetClient()
610
  qcl = GetClient(query=True)
611

    
612
  command = " ".join(args)
613

    
614
  nodes = GetOnlineNodes(nodes=opts.nodes, cl=qcl, nodegroup=opts.nodegroup)
615
  ports = GetNodesSshPorts(nodes, qcl)
616

    
617
  cluster_name, master_node = cl.QueryConfigValues(["cluster_name",
618
                                                    "master_node"])
619

    
620
  srun = ssh.SshRunner(cluster_name=cluster_name)
621

    
622
  # Make sure master node is at list end
623
  if master_node in nodes:
624
    nodes.remove(master_node)
625
    nodes.append(master_node)
626

    
627
  for (name, port) in zip(nodes, ports):
628
    result = srun.Run(name, constants.SSH_LOGIN_USER, command, port=port)
629

    
630
    if opts.failure_only and result.exit_code == constants.EXIT_SUCCESS:
631
      # Do not output anything for successful commands
632
      continue
633

    
634
    ToStdout("------------------------------------------------")
635
    if opts.show_machine_names:
636
      for line in result.output.splitlines():
637
        ToStdout("%s: %s", name, line)
638
    else:
639
      ToStdout("node: %s", name)
640
      ToStdout("%s", result.output)
641
    ToStdout("return code = %s", result.exit_code)
642

    
643
  return 0
644

    
645

    
646
def VerifyCluster(opts, args):
647
  """Verify integrity of cluster, performing various test on nodes.
648

649
  @param opts: the command line options selected by the user
650
  @type args: list
651
  @param args: should be an empty list
652
  @rtype: int
653
  @return: the desired exit code
654

655
  """
656
  skip_checks = []
657

    
658
  if opts.skip_nplusone_mem:
659
    skip_checks.append(constants.VERIFY_NPLUSONE_MEM)
660

    
661
  cl = GetClient()
662

    
663
  op = opcodes.OpClusterVerify(verbose=opts.verbose,
664
                               error_codes=opts.error_codes,
665
                               debug_simulate_errors=opts.simulate_errors,
666
                               skip_checks=skip_checks,
667
                               ignore_errors=opts.ignore_errors,
668
                               group_name=opts.nodegroup)
669
  result = SubmitOpCode(op, cl=cl, opts=opts)
670

    
671
  # Keep track of submitted jobs
672
  jex = JobExecutor(cl=cl, opts=opts)
673

    
674
  for (status, job_id) in result[constants.JOB_IDS_KEY]:
675
    jex.AddJobId(None, status, job_id)
676

    
677
  results = jex.GetResults()
678

    
679
  (bad_jobs, bad_results) = \
680
    map(len,
681
        # Convert iterators to lists
682
        map(list,
683
            # Count errors
684
            map(compat.partial(itertools.ifilterfalse, bool),
685
                # Convert result to booleans in a tuple
686
                zip(*((job_success, len(op_results) == 1 and op_results[0])
687
                      for (job_success, op_results) in results)))))
688

    
689
  if bad_jobs == 0 and bad_results == 0:
690
    rcode = constants.EXIT_SUCCESS
691
  else:
692
    rcode = constants.EXIT_FAILURE
693
    if bad_jobs > 0:
694
      ToStdout("%s job(s) failed while verifying the cluster.", bad_jobs)
695

    
696
  return rcode
697

    
698

    
699
def VerifyDisks(opts, args):
700
  """Verify integrity of cluster disks.
701

702
  @param opts: the command line options selected by the user
703
  @type args: list
704
  @param args: should be an empty list
705
  @rtype: int
706
  @return: the desired exit code
707

708
  """
709
  cl = GetClient()
710

    
711
  op = opcodes.OpClusterVerifyDisks()
712

    
713
  result = SubmitOpCode(op, cl=cl, opts=opts)
714

    
715
  # Keep track of submitted jobs
716
  jex = JobExecutor(cl=cl, opts=opts)
717

    
718
  for (status, job_id) in result[constants.JOB_IDS_KEY]:
719
    jex.AddJobId(None, status, job_id)
720

    
721
  retcode = constants.EXIT_SUCCESS
722

    
723
  for (status, result) in jex.GetResults():
724
    if not status:
725
      ToStdout("Job failed: %s", result)
726
      continue
727

    
728
    ((bad_nodes, instances, missing), ) = result
729

    
730
    for node, text in bad_nodes.items():
731
      ToStdout("Error gathering data on node %s: %s",
732
               node, utils.SafeEncode(text[-400:]))
733
      retcode = constants.EXIT_FAILURE
734
      ToStdout("You need to fix these nodes first before fixing instances")
735

    
736
    for iname in instances:
737
      if iname in missing:
738
        continue
739
      op = opcodes.OpInstanceActivateDisks(instance_name=iname)
740
      try:
741
        ToStdout("Activating disks for instance '%s'", iname)
742
        SubmitOpCode(op, opts=opts, cl=cl)
743
      except errors.GenericError, err:
744
        nret, msg = FormatError(err)
745
        retcode |= nret
746
        ToStderr("Error activating disks for instance %s: %s", iname, msg)
747

    
748
    if missing:
749
      for iname, ival in missing.iteritems():
750
        all_missing = compat.all(x[0] in bad_nodes for x in ival)
751
        if all_missing:
752
          ToStdout("Instance %s cannot be verified as it lives on"
753
                   " broken nodes", iname)
754
        else:
755
          ToStdout("Instance %s has missing logical volumes:", iname)
756
          ival.sort()
757
          for node, vol in ival:
758
            if node in bad_nodes:
759
              ToStdout("\tbroken node %s /dev/%s", node, vol)
760
            else:
761
              ToStdout("\t%s /dev/%s", node, vol)
762

    
763
      ToStdout("You need to replace or recreate disks for all the above"
764
               " instances if this message persists after fixing broken nodes.")
765
      retcode = constants.EXIT_FAILURE
766
    elif not instances:
767
      ToStdout("No disks need to be activated.")
768

    
769
  return retcode
770

    
771

    
772
def RepairDiskSizes(opts, args):
773
  """Verify sizes of cluster disks.
774

775
  @param opts: the command line options selected by the user
776
  @type args: list
777
  @param args: optional list of instances to restrict check to
778
  @rtype: int
779
  @return: the desired exit code
780

781
  """
782
  op = opcodes.OpClusterRepairDiskSizes(instances=args)
783
  SubmitOpCode(op, opts=opts)
784

    
785

    
786
@UsesRPC
787
def MasterFailover(opts, args):
788
  """Failover the master node.
789

790
  This command, when run on a non-master node, will cause the current
791
  master to cease being master, and the non-master to become new
792
  master.
793

794
  @param opts: the command line options selected by the user
795
  @type args: list
796
  @param args: should be an empty list
797
  @rtype: int
798
  @return: the desired exit code
799

800
  """
801
  if opts.no_voting and not opts.yes_do_it:
802
    usertext = ("This will perform the failover even if most other nodes"
803
                " are down, or if this node is outdated. This is dangerous"
804
                " as it can lead to a non-consistent cluster. Check the"
805
                " gnt-cluster(8) man page before proceeding. Continue?")
806
    if not AskUser(usertext):
807
      return 1
808

    
809
  return bootstrap.MasterFailover(no_voting=opts.no_voting)
810

    
811

    
812
def MasterPing(opts, args):
813
  """Checks if the master is alive.
814

815
  @param opts: the command line options selected by the user
816
  @type args: list
817
  @param args: should be an empty list
818
  @rtype: int
819
  @return: the desired exit code
820

821
  """
822
  try:
823
    cl = GetClient()
824
    cl.QueryClusterInfo()
825
    return 0
826
  except Exception: # pylint: disable=W0703
827
    return 1
828

    
829

    
830
def SearchTags(opts, args):
831
  """Searches the tags on all the cluster.
832

833
  @param opts: the command line options selected by the user
834
  @type args: list
835
  @param args: should contain only one element, the tag pattern
836
  @rtype: int
837
  @return: the desired exit code
838

839
  """
840
  op = opcodes.OpTagsSearch(pattern=args[0])
841
  result = SubmitOpCode(op, opts=opts)
842
  if not result:
843
    return 1
844
  result = list(result)
845
  result.sort()
846
  for path, tag in result:
847
    ToStdout("%s %s", path, tag)
848

    
849

    
850
def _ReadAndVerifyCert(cert_filename, verify_private_key=False):
851
  """Reads and verifies an X509 certificate.
852

853
  @type cert_filename: string
854
  @param cert_filename: the path of the file containing the certificate to
855
                        verify encoded in PEM format
856
  @type verify_private_key: bool
857
  @param verify_private_key: whether to verify the private key in addition to
858
                             the public certificate
859
  @rtype: string
860
  @return: a string containing the PEM-encoded certificate.
861

862
  """
863
  try:
864
    pem = utils.ReadFile(cert_filename)
865
  except IOError, err:
866
    raise errors.X509CertError(cert_filename,
867
                               "Unable to read certificate: %s" % str(err))
868

    
869
  try:
870
    OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
871
  except Exception, err:
872
    raise errors.X509CertError(cert_filename,
873
                               "Unable to load certificate: %s" % str(err))
874

    
875
  if verify_private_key:
876
    try:
877
      OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, pem)
878
    except Exception, err:
879
      raise errors.X509CertError(cert_filename,
880
                                 "Unable to load private key: %s" % str(err))
881

    
882
  return pem
883

    
884

    
885
def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
886
                 rapi_cert_filename, new_spice_cert, spice_cert_filename,
887
                 spice_cacert_filename, new_confd_hmac_key, new_cds,
888
                 cds_filename, force, new_node_cert):
889
  """Renews cluster certificates, keys and secrets.
890

891
  @type new_cluster_cert: bool
892
  @param new_cluster_cert: Whether to generate a new cluster certificate
893
  @type new_rapi_cert: bool
894
  @param new_rapi_cert: Whether to generate a new RAPI certificate
895
  @type rapi_cert_filename: string
896
  @param rapi_cert_filename: Path to file containing new RAPI certificate
897
  @type new_spice_cert: bool
898
  @param new_spice_cert: Whether to generate a new SPICE certificate
899
  @type spice_cert_filename: string
900
  @param spice_cert_filename: Path to file containing new SPICE certificate
901
  @type spice_cacert_filename: string
902
  @param spice_cacert_filename: Path to file containing the certificate of the
903
                                CA that signed the SPICE certificate
904
  @type new_confd_hmac_key: bool
905
  @param new_confd_hmac_key: Whether to generate a new HMAC key
906
  @type new_cds: bool
907
  @param new_cds: Whether to generate a new cluster domain secret
908
  @type cds_filename: string
909
  @param cds_filename: Path to file containing new cluster domain secret
910
  @type force: bool
911
  @param force: Whether to ask user for confirmation
912
  @type new_node_cert: string
913
  @param new_node_cert: Whether to generate new node certificates
914

915
  """
916
  if new_rapi_cert and rapi_cert_filename:
917
    ToStderr("Only one of the --new-rapi-certificate and --rapi-certificate"
918
             " options can be specified at the same time.")
919
    return 1
920

    
921
  if new_cds and cds_filename:
922
    ToStderr("Only one of the --new-cluster-domain-secret and"
923
             " --cluster-domain-secret options can be specified at"
924
             " the same time.")
925
    return 1
926

    
927
  if new_spice_cert and (spice_cert_filename or spice_cacert_filename):
928
    ToStderr("When using --new-spice-certificate, the --spice-certificate"
929
             " and --spice-ca-certificate must not be used.")
930
    return 1
931

    
932
  if bool(spice_cacert_filename) ^ bool(spice_cert_filename):
933
    ToStderr("Both --spice-certificate and --spice-ca-certificate must be"
934
             " specified.")
935
    return 1
936

    
937
  rapi_cert_pem, spice_cert_pem, spice_cacert_pem = (None, None, None)
938
  try:
939
    if rapi_cert_filename:
940
      rapi_cert_pem = _ReadAndVerifyCert(rapi_cert_filename, True)
941
    if spice_cert_filename:
942
      spice_cert_pem = _ReadAndVerifyCert(spice_cert_filename, True)
943
      spice_cacert_pem = _ReadAndVerifyCert(spice_cacert_filename)
944
  except errors.X509CertError, err:
945
    ToStderr("Unable to load X509 certificate from %s: %s", err[0], err[1])
946
    return 1
947

    
948
  if cds_filename:
949
    try:
950
      cds = utils.ReadFile(cds_filename)
951
    except Exception, err: # pylint: disable=W0703
952
      ToStderr("Can't load new cluster domain secret from %s: %s" %
953
               (cds_filename, str(err)))
954
      return 1
955
  else:
956
    cds = None
957

    
958
  if not force:
959
    usertext = ("This requires all daemons on all nodes to be restarted and"
960
                " may take some time. Continue?")
961
    if not AskUser(usertext):
962
      return 1
963

    
964
  def _RenewCryptoInner(ctx):
965
    ctx.feedback_fn("Updating certificates and keys")
966
    # Note: the node certificate will be generated in the LU
967
    bootstrap.GenerateClusterCrypto(new_cluster_cert,
968
                                    new_rapi_cert,
969
                                    new_spice_cert,
970
                                    new_confd_hmac_key,
971
                                    new_cds,
972
                                    rapi_cert_pem=rapi_cert_pem,
973
                                    spice_cert_pem=spice_cert_pem,
974
                                    spice_cacert_pem=spice_cacert_pem,
975
                                    cds=cds)
976

    
977
    files_to_copy = []
978

    
979
    if new_cluster_cert:
980
      files_to_copy.append(pathutils.NODED_CERT_FILE)
981

    
982
    if new_rapi_cert or rapi_cert_pem:
983
      files_to_copy.append(pathutils.RAPI_CERT_FILE)
984

    
985
    if new_spice_cert or spice_cert_pem:
986
      files_to_copy.append(pathutils.SPICE_CERT_FILE)
987
      files_to_copy.append(pathutils.SPICE_CACERT_FILE)
988

    
989
    if new_confd_hmac_key:
990
      files_to_copy.append(pathutils.CONFD_HMAC_KEY)
991

    
992
    if new_cds or cds:
993
      files_to_copy.append(pathutils.CLUSTER_DOMAIN_SECRET_FILE)
994

    
995
    if files_to_copy:
996
      for node_name in ctx.nonmaster_nodes:
997
        port = ctx.ssh_ports[node_name]
998
        ctx.feedback_fn("Copying %s to %s:%d" %
999
                        (", ".join(files_to_copy), node_name, port))
1000
        for file_name in files_to_copy:
1001
          ctx.ssh.CopyFileToNode(node_name, port, file_name)
1002

    
1003
  RunWhileClusterStopped(ToStdout, _RenewCryptoInner)
1004

    
1005
  ToStdout("All requested certificates and keys have been replaced."
1006
           " Running \"gnt-cluster verify\" now is recommended.")
1007

    
1008
  if new_node_cert:
1009
    cl = GetClient()
1010
    renew_op = opcodes.OpClusterRenewCrypto()
1011
    SubmitOpCode(renew_op, cl=cl)
1012

    
1013
  return 0
1014

    
1015

    
1016
def RenewCrypto(opts, args):
1017
  """Renews cluster certificates, keys and secrets.
1018

1019
  """
1020
  return _RenewCrypto(opts.new_cluster_cert,
1021
                      opts.new_rapi_cert,
1022
                      opts.rapi_cert,
1023
                      opts.new_spice_cert,
1024
                      opts.spice_cert,
1025
                      opts.spice_cacert,
1026
                      opts.new_confd_hmac_key,
1027
                      opts.new_cluster_domain_secret,
1028
                      opts.cluster_domain_secret,
1029
                      opts.force,
1030
                      opts.new_node_cert)
1031

    
1032

    
1033
def _GetEnabledDiskTemplates(opts):
1034
  """Determine the list of enabled disk templates.
1035

1036
  """
1037
  if opts.enabled_disk_templates:
1038
    return opts.enabled_disk_templates.split(",")
1039
  else:
1040
    return None
1041

    
1042

    
1043
def _GetVgName(opts, enabled_disk_templates):
1044
  """Determine the volume group name.
1045

1046
  @type enabled_disk_templates: list of strings
1047
  @param enabled_disk_templates: cluster-wide enabled disk-templates
1048

1049
  """
1050
  # consistency between vg name and enabled disk templates
1051
  vg_name = None
1052
  if opts.vg_name is not None:
1053
    vg_name = opts.vg_name
1054
  if enabled_disk_templates:
1055
    if vg_name and not utils.IsLvmEnabled(enabled_disk_templates):
1056
      ToStdout("You specified a volume group with --vg-name, but you did not"
1057
               " enable any of the following lvm-based disk templates: %s" %
1058
               utils.CommaJoin(constants.DTS_LVM))
1059
  return vg_name
1060

    
1061

    
1062
def _GetDrbdHelper(opts, enabled_disk_templates):
1063
  """Determine the DRBD usermode helper.
1064

1065
  """
1066
  drbd_helper = opts.drbd_helper
1067
  if enabled_disk_templates:
1068
    drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
1069
    if not drbd_enabled and opts.drbd_helper:
1070
      ToStdout("You specified a DRBD usermode helper with "
1071
               " --drbd-usermode-helper while DRBD is not enabled.")
1072
  return drbd_helper
1073

    
1074

    
1075
def SetClusterParams(opts, args):
1076
  """Modify the cluster.
1077

1078
  @param opts: the command line options selected by the user
1079
  @type args: list
1080
  @param args: should be an empty list
1081
  @rtype: int
1082
  @return: the desired exit code
1083

1084
  """
1085
  if not (opts.vg_name is not None or
1086
          opts.drbd_helper is not None or
1087
          opts.enabled_hypervisors or opts.hvparams or
1088
          opts.beparams or opts.nicparams or
1089
          opts.ndparams or opts.diskparams or
1090
          opts.candidate_pool_size is not None or
1091
          opts.uid_pool is not None or
1092
          opts.maintain_node_health is not None or
1093
          opts.add_uids is not None or
1094
          opts.remove_uids is not None or
1095
          opts.default_iallocator is not None or
1096
          opts.default_iallocator_params or
1097
          opts.reserved_lvs is not None or
1098
          opts.master_netdev is not None or
1099
          opts.master_netmask is not None or
1100
          opts.use_external_mip_script is not None or
1101
          opts.prealloc_wipe_disks is not None or
1102
          opts.hv_state or
1103
          opts.enabled_disk_templates or
1104
          opts.disk_state or
1105
          opts.ipolicy_bounds_specs is not None or
1106
          opts.ipolicy_std_specs is not None or
1107
          opts.ipolicy_disk_templates is not None or
1108
          opts.ipolicy_vcpu_ratio is not None or
1109
          opts.ipolicy_spindle_ratio is not None or
1110
          opts.modify_etc_hosts is not None or
1111
          opts.file_storage_dir is not None):
1112
    ToStderr("Please give at least one of the parameters.")
1113
    return 1
1114

    
1115
  enabled_disk_templates = _GetEnabledDiskTemplates(opts)
1116
  vg_name = _GetVgName(opts, enabled_disk_templates)
1117

    
1118
  try:
1119
    drbd_helper = _GetDrbdHelper(opts, enabled_disk_templates)
1120
  except errors.OpPrereqError, e:
1121
    ToStderr(str(e))
1122
    return 1
1123

    
1124
  hvlist = opts.enabled_hypervisors
1125
  if hvlist is not None:
1126
    hvlist = hvlist.split(",")
1127

    
1128
  # a list of (name, dict) we can pass directly to dict() (or [])
1129
  hvparams = dict(opts.hvparams)
1130
  for hv_params in hvparams.values():
1131
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1132

    
1133
  diskparams = dict(opts.diskparams)
1134

    
1135
  for dt_params in diskparams.values():
1136
    utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
1137

    
1138
  beparams = opts.beparams
1139
  utils.ForceDictType(beparams, constants.BES_PARAMETER_COMPAT)
1140

    
1141
  nicparams = opts.nicparams
1142
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
1143

    
1144
  ndparams = opts.ndparams
1145
  if ndparams is not None:
1146
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
1147

    
1148
  ipolicy = CreateIPolicyFromOpts(
1149
    minmax_ispecs=opts.ipolicy_bounds_specs,
1150
    std_ispecs=opts.ipolicy_std_specs,
1151
    ipolicy_disk_templates=opts.ipolicy_disk_templates,
1152
    ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
1153
    ipolicy_spindle_ratio=opts.ipolicy_spindle_ratio,
1154
    )
1155

    
1156
  mnh = opts.maintain_node_health
1157

    
1158
  uid_pool = opts.uid_pool
1159
  if uid_pool is not None:
1160
    uid_pool = uidpool.ParseUidPool(uid_pool)
1161

    
1162
  add_uids = opts.add_uids
1163
  if add_uids is not None:
1164
    add_uids = uidpool.ParseUidPool(add_uids)
1165

    
1166
  remove_uids = opts.remove_uids
1167
  if remove_uids is not None:
1168
    remove_uids = uidpool.ParseUidPool(remove_uids)
1169

    
1170
  if opts.reserved_lvs is not None:
1171
    if opts.reserved_lvs == "":
1172
      opts.reserved_lvs = []
1173
    else:
1174
      opts.reserved_lvs = utils.UnescapeAndSplit(opts.reserved_lvs, sep=",")
1175

    
1176
  if opts.master_netmask is not None:
1177
    try:
1178
      opts.master_netmask = int(opts.master_netmask)
1179
    except ValueError:
1180
      ToStderr("The --master-netmask option expects an int parameter.")
1181
      return 1
1182

    
1183
  ext_ip_script = opts.use_external_mip_script
1184

    
1185
  if opts.disk_state:
1186
    disk_state = utils.FlatToDict(opts.disk_state)
1187
  else:
1188
    disk_state = {}
1189

    
1190
  hv_state = dict(opts.hv_state)
1191

    
1192
  op = opcodes.OpClusterSetParams(
1193
    vg_name=vg_name,
1194
    drbd_helper=drbd_helper,
1195
    enabled_hypervisors=hvlist,
1196
    hvparams=hvparams,
1197
    os_hvp=None,
1198
    beparams=beparams,
1199
    nicparams=nicparams,
1200
    ndparams=ndparams,
1201
    diskparams=diskparams,
1202
    ipolicy=ipolicy,
1203
    candidate_pool_size=opts.candidate_pool_size,
1204
    maintain_node_health=mnh,
1205
    modify_etc_hosts=opts.modify_etc_hosts,
1206
    uid_pool=uid_pool,
1207
    add_uids=add_uids,
1208
    remove_uids=remove_uids,
1209
    default_iallocator=opts.default_iallocator,
1210
    default_iallocator_params=opts.default_iallocator_params,
1211
    prealloc_wipe_disks=opts.prealloc_wipe_disks,
1212
    master_netdev=opts.master_netdev,
1213
    master_netmask=opts.master_netmask,
1214
    reserved_lvs=opts.reserved_lvs,
1215
    use_external_mip_script=ext_ip_script,
1216
    hv_state=hv_state,
1217
    disk_state=disk_state,
1218
    enabled_disk_templates=enabled_disk_templates,
1219
    force=opts.force,
1220
    file_storage_dir=opts.file_storage_dir,
1221
    )
1222
  SubmitOrSend(op, opts)
1223
  return 0
1224

    
1225

    
1226
def QueueOps(opts, args):
1227
  """Queue operations.
1228

1229
  @param opts: the command line options selected by the user
1230
  @type args: list
1231
  @param args: should contain only one element, the subcommand
1232
  @rtype: int
1233
  @return: the desired exit code
1234

1235
  """
1236
  command = args[0]
1237
  client = GetClient()
1238
  if command in ("drain", "undrain"):
1239
    drain_flag = command == "drain"
1240
    client.SetQueueDrainFlag(drain_flag)
1241
  elif command == "info":
1242
    result = client.QueryConfigValues(["drain_flag"])
1243
    if result[0]:
1244
      val = "set"
1245
    else:
1246
      val = "unset"
1247
    ToStdout("The drain flag is %s" % val)
1248
  else:
1249
    raise errors.OpPrereqError("Command '%s' is not valid." % command,
1250
                               errors.ECODE_INVAL)
1251

    
1252
  return 0
1253

    
1254

    
1255
def _ShowWatcherPause(until):
1256
  if until is None or until < time.time():
1257
    ToStdout("The watcher is not paused.")
1258
  else:
1259
    ToStdout("The watcher is paused until %s.", time.ctime(until))
1260

    
1261

    
1262
def WatcherOps(opts, args):
1263
  """Watcher operations.
1264

1265
  @param opts: the command line options selected by the user
1266
  @type args: list
1267
  @param args: should contain only one element, the subcommand
1268
  @rtype: int
1269
  @return: the desired exit code
1270

1271
  """
1272
  command = args[0]
1273
  client = GetClient()
1274

    
1275
  if command == "continue":
1276
    client.SetWatcherPause(None)
1277
    ToStdout("The watcher is no longer paused.")
1278

    
1279
  elif command == "pause":
1280
    if len(args) < 2:
1281
      raise errors.OpPrereqError("Missing pause duration", errors.ECODE_INVAL)
1282

    
1283
    result = client.SetWatcherPause(time.time() + ParseTimespec(args[1]))
1284
    _ShowWatcherPause(result)
1285

    
1286
  elif command == "info":
1287
    result = client.QueryConfigValues(["watcher_pause"])
1288
    _ShowWatcherPause(result[0])
1289

    
1290
  else:
1291
    raise errors.OpPrereqError("Command '%s' is not valid." % command,
1292
                               errors.ECODE_INVAL)
1293

    
1294
  return 0
1295

    
1296

    
1297
def _OobPower(opts, node_list, power):
1298
  """Puts the node in the list to desired power state.
1299

1300
  @param opts: The command line options selected by the user
1301
  @param node_list: The list of nodes to operate on
1302
  @param power: True if they should be powered on, False otherwise
1303
  @return: The success of the operation (none failed)
1304

1305
  """
1306
  if power:
1307
    command = constants.OOB_POWER_ON
1308
  else:
1309
    command = constants.OOB_POWER_OFF
1310

    
1311
  op = opcodes.OpOobCommand(node_names=node_list,
1312
                            command=command,
1313
                            ignore_status=True,
1314
                            timeout=opts.oob_timeout,
1315
                            power_delay=opts.power_delay)
1316
  result = SubmitOpCode(op, opts=opts)
1317
  errs = 0
1318
  for node_result in result:
1319
    (node_tuple, data_tuple) = node_result
1320
    (_, node_name) = node_tuple
1321
    (data_status, _) = data_tuple
1322
    if data_status != constants.RS_NORMAL:
1323
      assert data_status != constants.RS_UNAVAIL
1324
      errs += 1
1325
      ToStderr("There was a problem changing power for %s, please investigate",
1326
               node_name)
1327

    
1328
  if errs > 0:
1329
    return False
1330

    
1331
  return True
1332

    
1333

    
1334
def _InstanceStart(opts, inst_list, start, no_remember=False):
1335
  """Puts the instances in the list to desired state.
1336

1337
  @param opts: The command line options selected by the user
1338
  @param inst_list: The list of instances to operate on
1339
  @param start: True if they should be started, False for shutdown
1340
  @param no_remember: If the instance state should be remembered
1341
  @return: The success of the operation (none failed)
1342

1343
  """
1344
  if start:
1345
    opcls = opcodes.OpInstanceStartup
1346
    text_submit, text_success, text_failed = ("startup", "started", "starting")
1347
  else:
1348
    opcls = compat.partial(opcodes.OpInstanceShutdown,
1349
                           timeout=opts.shutdown_timeout,
1350
                           no_remember=no_remember)
1351
    text_submit, text_success, text_failed = ("shutdown", "stopped", "stopping")
1352

    
1353
  jex = JobExecutor(opts=opts)
1354

    
1355
  for inst in inst_list:
1356
    ToStdout("Submit %s of instance %s", text_submit, inst)
1357
    op = opcls(instance_name=inst)
1358
    jex.QueueJob(inst, op)
1359

    
1360
  results = jex.GetResults()
1361
  bad_cnt = len([1 for (success, _) in results if not success])
1362

    
1363
  if bad_cnt == 0:
1364
    ToStdout("All instances have been %s successfully", text_success)
1365
  else:
1366
    ToStderr("There were errors while %s instances:\n"
1367
             "%d error(s) out of %d instance(s)", text_failed, bad_cnt,
1368
             len(results))
1369
    return False
1370

    
1371
  return True
1372

    
1373

    
1374
class _RunWhenNodesReachableHelper:
1375
  """Helper class to make shared internal state sharing easier.
1376

1377
  @ivar success: Indicates if all action_cb calls were successful
1378

1379
  """
1380
  def __init__(self, node_list, action_cb, node2ip, port, feedback_fn,
1381
               _ping_fn=netutils.TcpPing, _sleep_fn=time.sleep):
1382
    """Init the object.
1383

1384
    @param node_list: The list of nodes to be reachable
1385
    @param action_cb: Callback called when a new host is reachable
1386
    @type node2ip: dict
1387
    @param node2ip: Node to ip mapping
1388
    @param port: The port to use for the TCP ping
1389
    @param feedback_fn: The function used for feedback
1390
    @param _ping_fn: Function to check reachabilty (for unittest use only)
1391
    @param _sleep_fn: Function to sleep (for unittest use only)
1392

1393
    """
1394
    self.down = set(node_list)
1395
    self.up = set()
1396
    self.node2ip = node2ip
1397
    self.success = True
1398
    self.action_cb = action_cb
1399
    self.port = port
1400
    self.feedback_fn = feedback_fn
1401
    self._ping_fn = _ping_fn
1402
    self._sleep_fn = _sleep_fn
1403

    
1404
  def __call__(self):
1405
    """When called we run action_cb.
1406

1407
    @raises utils.RetryAgain: When there are still down nodes
1408

1409
    """
1410
    if not self.action_cb(self.up):
1411
      self.success = False
1412

    
1413
    if self.down:
1414
      raise utils.RetryAgain()
1415
    else:
1416
      return self.success
1417

    
1418
  def Wait(self, secs):
1419
    """Checks if a host is up or waits remaining seconds.
1420

1421
    @param secs: The secs remaining
1422

1423
    """
1424
    start = time.time()
1425
    for node in self.down:
1426
      if self._ping_fn(self.node2ip[node], self.port, timeout=_EPO_PING_TIMEOUT,
1427
                       live_port_needed=True):
1428
        self.feedback_fn("Node %s became available" % node)
1429
        self.up.add(node)
1430
        self.down -= self.up
1431
        # If we have a node available there is the possibility to run the
1432
        # action callback successfully, therefore we don't wait and return
1433
        return
1434

    
1435
    self._sleep_fn(max(0.0, start + secs - time.time()))
1436

    
1437

    
1438
def _RunWhenNodesReachable(node_list, action_cb, interval):
1439
  """Run action_cb when nodes become reachable.
1440

1441
  @param node_list: The list of nodes to be reachable
1442
  @param action_cb: Callback called when a new host is reachable
1443
  @param interval: The earliest time to retry
1444

1445
  """
1446
  client = GetClient()
1447
  cluster_info = client.QueryClusterInfo()
1448
  if cluster_info["primary_ip_version"] == constants.IP4_VERSION:
1449
    family = netutils.IPAddress.family
1450
  else:
1451
    family = netutils.IP6Address.family
1452

    
1453
  node2ip = dict((node, netutils.GetHostname(node, family=family).ip)
1454
                 for node in node_list)
1455

    
1456
  port = netutils.GetDaemonPort(constants.NODED)
1457
  helper = _RunWhenNodesReachableHelper(node_list, action_cb, node2ip, port,
1458
                                        ToStdout)
1459

    
1460
  try:
1461
    return utils.Retry(helper, interval, _EPO_REACHABLE_TIMEOUT,
1462
                       wait_fn=helper.Wait)
1463
  except utils.RetryTimeout:
1464
    ToStderr("Time exceeded while waiting for nodes to become reachable"
1465
             " again:\n  - %s", "  - ".join(helper.down))
1466
    return False
1467

    
1468

    
1469
def _MaybeInstanceStartup(opts, inst_map, nodes_online,
1470
                          _instance_start_fn=_InstanceStart):
1471
  """Start the instances conditional based on node_states.
1472

1473
  @param opts: The command line options selected by the user
1474
  @param inst_map: A dict of inst -> nodes mapping
1475
  @param nodes_online: A list of nodes online
1476
  @param _instance_start_fn: Callback to start instances (unittest use only)
1477
  @return: Success of the operation on all instances
1478

1479
  """
1480
  start_inst_list = []
1481
  for (inst, nodes) in inst_map.items():
1482
    if not (nodes - nodes_online):
1483
      # All nodes the instance lives on are back online
1484
      start_inst_list.append(inst)
1485

    
1486
  for inst in start_inst_list:
1487
    del inst_map[inst]
1488

    
1489
  if start_inst_list:
1490
    return _instance_start_fn(opts, start_inst_list, True)
1491

    
1492
  return True
1493

    
1494

    
1495
def _EpoOn(opts, full_node_list, node_list, inst_map):
1496
  """Does the actual power on.
1497

1498
  @param opts: The command line options selected by the user
1499
  @param full_node_list: All nodes to operate on (includes nodes not supporting
1500
                         OOB)
1501
  @param node_list: The list of nodes to operate on (all need to support OOB)
1502
  @param inst_map: A dict of inst -> nodes mapping
1503
  @return: The desired exit status
1504

1505
  """
1506
  if node_list and not _OobPower(opts, node_list, False):
1507
    ToStderr("Not all nodes seem to get back up, investigate and start"
1508
             " manually if needed")
1509

    
1510
  # Wait for the nodes to be back up
1511
  action_cb = compat.partial(_MaybeInstanceStartup, opts, dict(inst_map))
1512

    
1513
  ToStdout("Waiting until all nodes are available again")
1514
  if not _RunWhenNodesReachable(full_node_list, action_cb, _EPO_PING_INTERVAL):
1515
    ToStderr("Please investigate and start stopped instances manually")
1516
    return constants.EXIT_FAILURE
1517

    
1518
  return constants.EXIT_SUCCESS
1519

    
1520

    
1521
def _EpoOff(opts, node_list, inst_map):
1522
  """Does the actual power off.
1523

1524
  @param opts: The command line options selected by the user
1525
  @param node_list: The list of nodes to operate on (all need to support OOB)
1526
  @param inst_map: A dict of inst -> nodes mapping
1527
  @return: The desired exit status
1528

1529
  """
1530
  if not _InstanceStart(opts, inst_map.keys(), False, no_remember=True):
1531
    ToStderr("Please investigate and stop instances manually before continuing")
1532
    return constants.EXIT_FAILURE
1533

    
1534
  if not node_list:
1535
    return constants.EXIT_SUCCESS
1536

    
1537
  if _OobPower(opts, node_list, False):
1538
    return constants.EXIT_SUCCESS
1539
  else:
1540
    return constants.EXIT_FAILURE
1541

    
1542

    
1543
def Epo(opts, args, qcl=None, _on_fn=_EpoOn, _off_fn=_EpoOff,
1544
        _confirm_fn=ConfirmOperation,
1545
        _stdout_fn=ToStdout, _stderr_fn=ToStderr):
1546
  """EPO operations.
1547

1548
  @param opts: the command line options selected by the user
1549
  @type args: list
1550
  @param args: should contain only one element, the subcommand
1551
  @rtype: int
1552
  @return: the desired exit code
1553

1554
  """
1555
  if opts.groups and opts.show_all:
1556
    _stderr_fn("Only one of --groups or --all are allowed")
1557
    return constants.EXIT_FAILURE
1558
  elif args and opts.show_all:
1559
    _stderr_fn("Arguments in combination with --all are not allowed")
1560
    return constants.EXIT_FAILURE
1561

    
1562
  if qcl is None:
1563
    # Query client
1564
    qcl = GetClient(query=True)
1565

    
1566
  if opts.groups:
1567
    node_query_list = \
1568
      itertools.chain(*qcl.QueryGroups(args, ["node_list"], False))
1569
  else:
1570
    node_query_list = args
1571

    
1572
  result = qcl.QueryNodes(node_query_list, ["name", "master", "pinst_list",
1573
                                            "sinst_list", "powered", "offline"],
1574
                          False)
1575

    
1576
  all_nodes = map(compat.fst, result)
1577
  node_list = []
1578
  inst_map = {}
1579
  for (node, master, pinsts, sinsts, powered, offline) in result:
1580
    if not offline:
1581
      for inst in (pinsts + sinsts):
1582
        if inst in inst_map:
1583
          if not master:
1584
            inst_map[inst].add(node)
1585
        elif master:
1586
          inst_map[inst] = set()
1587
        else:
1588
          inst_map[inst] = set([node])
1589

    
1590
    if master and opts.on:
1591
      # We ignore the master for turning on the machines, in fact we are
1592
      # already operating on the master at this point :)
1593
      continue
1594
    elif master and not opts.show_all:
1595
      _stderr_fn("%s is the master node, please do a master-failover to another"
1596
                 " node not affected by the EPO or use --all if you intend to"
1597
                 " shutdown the whole cluster", node)
1598
      return constants.EXIT_FAILURE
1599
    elif powered is None:
1600
      _stdout_fn("Node %s does not support out-of-band handling, it can not be"
1601
                 " handled in a fully automated manner", node)
1602
    elif powered == opts.on:
1603
      _stdout_fn("Node %s is already in desired power state, skipping", node)
1604
    elif not offline or (offline and powered):
1605
      node_list.append(node)
1606

    
1607
  if not (opts.force or _confirm_fn(all_nodes, "nodes", "epo")):
1608
    return constants.EXIT_FAILURE
1609

    
1610
  if opts.on:
1611
    return _on_fn(opts, all_nodes, node_list, inst_map)
1612
  else:
1613
    return _off_fn(opts, node_list, inst_map)
1614

    
1615

    
1616
def _GetCreateCommand(info):
1617
  buf = StringIO()
1618
  buf.write("gnt-cluster init")
1619
  PrintIPolicyCommand(buf, info["ipolicy"], False)
1620
  buf.write(" ")
1621
  buf.write(info["name"])
1622
  return buf.getvalue()
1623

    
1624

    
1625
def ShowCreateCommand(opts, args):
1626
  """Shows the command that can be used to re-create the cluster.
1627

1628
  Currently it works only for ipolicy specs.
1629

1630
  """
1631
  cl = GetClient(query=True)
1632
  result = cl.QueryClusterInfo()
1633
  ToStdout(_GetCreateCommand(result))
1634

    
1635

    
1636
def _RunCommandAndReport(cmd):
1637
  """Run a command and report its output, iff it failed.
1638

1639
  @param cmd: the command to execute
1640
  @type cmd: list
1641
  @rtype: bool
1642
  @return: False, if the execution failed.
1643

1644
  """
1645
  result = utils.RunCmd(cmd)
1646
  if result.failed:
1647
    ToStderr("Command %s failed: %s; Output %s" %
1648
             (cmd, result.fail_reason, result.output))
1649
    return False
1650
  return True
1651

    
1652

    
1653
def _VerifyCommand(cmd):
1654
  """Verify that a given command succeeds on all online nodes.
1655

1656
  As this function is intended to run during upgrades, it
1657
  is implemented in such a way that it still works, if all Ganeti
1658
  daemons are down.
1659

1660
  @param cmd: the command to execute
1661
  @type cmd: list
1662
  @rtype: list
1663
  @return: the list of node names that are online where
1664
      the command failed.
1665

1666
  """
1667
  command = utils.text.ShellQuoteArgs([str(val) for val in cmd])
1668

    
1669
  nodes = ssconf.SimpleStore().GetOnlineNodeList()
1670
  master_node = ssconf.SimpleStore().GetMasterNode()
1671
  cluster_name = ssconf.SimpleStore().GetClusterName()
1672

    
1673
  # If master node is in 'nodes', make sure master node is at list end
1674
  if master_node in nodes:
1675
    nodes.remove(master_node)
1676
    nodes.append(master_node)
1677

    
1678
  failed = []
1679

    
1680
  srun = ssh.SshRunner(cluster_name=cluster_name)
1681
  for name in nodes:
1682
    result = srun.Run(name, constants.SSH_LOGIN_USER, command)
1683
    if result.exit_code != 0:
1684
      failed.append(name)
1685

    
1686
  return failed
1687

    
1688

    
1689
def _VerifyVersionInstalled(versionstring):
1690
  """Verify that the given version of ganeti is installed on all online nodes.
1691

1692
  Do nothing, if this is the case, otherwise print an appropriate
1693
  message to stderr.
1694

1695
  @param versionstring: the version to check for
1696
  @type versionstring: string
1697
  @rtype: bool
1698
  @return: True, if the version is installed on all online nodes
1699

1700
  """
1701
  badnodes = _VerifyCommand(["test", "-d",
1702
                             os.path.join(pathutils.PKGLIBDIR, versionstring)])
1703
  if badnodes:
1704
    ToStderr("Ganeti version %s not installed on nodes %s"
1705
             % (versionstring, ", ".join(badnodes)))
1706
    return False
1707

    
1708
  return True
1709

    
1710

    
1711
def _GetRunning():
1712
  """Determine the list of running jobs.
1713

1714
  @rtype: list
1715
  @return: the number of jobs still running
1716

1717
  """
1718
  cl = GetClient()
1719
  qfilter = qlang.MakeSimpleFilter("status",
1720
                                   frozenset([constants.JOB_STATUS_RUNNING]))
1721
  return len(cl.Query(constants.QR_JOB, [], qfilter).data)
1722

    
1723

    
1724
def _SetGanetiVersion(versionstring):
1725
  """Set the active version of ganeti to the given versionstring
1726

1727
  @type versionstring: string
1728
  @rtype: list
1729
  @return: the list of nodes where the version change failed
1730

1731
  """
1732
  failed = []
1733
  if constants.HAS_GNU_LN:
1734
    failed.extend(_VerifyCommand(
1735
        ["ln", "-s", "-f", "-T",
1736
         os.path.join(pathutils.PKGLIBDIR, versionstring),
1737
         os.path.join(pathutils.SYSCONFDIR, "ganeti/lib")]))
1738
    failed.extend(_VerifyCommand(
1739
        ["ln", "-s", "-f", "-T",
1740
         os.path.join(pathutils.SHAREDIR, versionstring),
1741
         os.path.join(pathutils.SYSCONFDIR, "ganeti/share")]))
1742
  else:
1743
    failed.extend(_VerifyCommand(
1744
        ["rm", "-f", os.path.join(pathutils.SYSCONFDIR, "ganeti/lib")]))
1745
    failed.extend(_VerifyCommand(
1746
        ["ln", "-s", "-f", os.path.join(pathutils.PKGLIBDIR, versionstring),
1747
         os.path.join(pathutils.SYSCONFDIR, "ganeti/lib")]))
1748
    failed.extend(_VerifyCommand(
1749
        ["rm", "-f", os.path.join(pathutils.SYSCONFDIR, "ganeti/share")]))
1750
    failed.extend(_VerifyCommand(
1751
        ["ln", "-s", "-f", os.path.join(pathutils.SHAREDIR, versionstring),
1752
         os.path.join(pathutils.SYSCONFDIR, "ganeti/share")]))
1753
  return list(set(failed))
1754

    
1755

    
1756
def _ExecuteCommands(fns):
1757
  """Execute a list of functions, in reverse order.
1758

1759
  @type fns: list of functions.
1760
  @param fns: the functions to be executed.
1761

1762
  """
1763
  for fn in reversed(fns):
1764
    fn()
1765

    
1766

    
1767
def _GetConfigVersion():
1768
  """Determine the version the configuration file currently has.
1769

1770
  @rtype: tuple or None
1771
  @return: (major, minor, revision) if the version can be determined,
1772
      None otherwise
1773

1774
  """
1775
  config_data = serializer.LoadJson(utils.ReadFile(pathutils.CLUSTER_CONF_FILE))
1776
  try:
1777
    config_version = config_data["version"]
1778
  except KeyError:
1779
    return None
1780
  return utils.SplitVersion(config_version)
1781

    
1782

    
1783
def _ReadIntentToUpgrade():
1784
  """Read the file documenting the intent to upgrade the cluster.
1785

1786
  @rtype: string or None
1787
  @return: the version to upgrade to, if the file exists, and None
1788
      otherwise.
1789

1790
  """
1791
  if not os.path.isfile(pathutils.INTENT_TO_UPGRADE):
1792
    return None
1793

    
1794
  contentstring = utils.ReadFile(pathutils.INTENT_TO_UPGRADE)
1795
  contents = utils.UnescapeAndSplit(contentstring)
1796
  if len(contents) != 2:
1797
    # file syntactically mal-formed
1798
    return None
1799
  return contents[0]
1800

    
1801

    
1802
def _WriteIntentToUpgrade(version):
1803
  """Write file documenting the intent to upgrade the cluster.
1804

1805
  @type version: string
1806
  @param version: the version we intent to upgrade to
1807

1808
  """
1809
  utils.WriteFile(pathutils.INTENT_TO_UPGRADE,
1810
                  data=utils.EscapeAndJoin([version, "%d" % os.getpid()]))
1811

    
1812

    
1813
def _UpgradeBeforeConfigurationChange(versionstring):
1814
  """
1815
  Carry out all the tasks necessary for an upgrade that happen before
1816
  the configuration file, or Ganeti version, changes.
1817

1818
  @type versionstring: string
1819
  @param versionstring: the version to upgrade to
1820
  @rtype: (bool, list)
1821
  @return: tuple of a bool indicating success and a list of rollback tasks
1822

1823
  """
1824
  rollback = []
1825

    
1826
  if not _VerifyVersionInstalled(versionstring):
1827
    return (False, rollback)
1828

    
1829
  _WriteIntentToUpgrade(versionstring)
1830
  rollback.append(
1831
    lambda: utils.RunCmd(["rm", "-f", pathutils.INTENT_TO_UPGRADE]))
1832

    
1833
  ToStdout("Draining queue")
1834
  client = GetClient()
1835
  client.SetQueueDrainFlag(True)
1836

    
1837
  rollback.append(lambda: GetClient().SetQueueDrainFlag(False))
1838

    
1839
  if utils.SimpleRetry(0, _GetRunning,
1840
                       constants.UPGRADE_QUEUE_POLL_INTERVAL,
1841
                       constants.UPGRADE_QUEUE_DRAIN_TIMEOUT):
1842
    ToStderr("Failed to completely empty the queue.")
1843
    return (False, rollback)
1844

    
1845
  ToStdout("Stopping daemons on master node.")
1846
  if not _RunCommandAndReport([pathutils.DAEMON_UTIL, "stop-all"]):
1847
    return (False, rollback)
1848

    
1849
  if not _VerifyVersionInstalled(versionstring):
1850
    utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
1851
    return (False, rollback)
1852

    
1853
  ToStdout("Stopping daemons everywhere.")
1854
  rollback.append(lambda: _VerifyCommand([pathutils.DAEMON_UTIL, "start-all"]))
1855
  badnodes = _VerifyCommand([pathutils.DAEMON_UTIL, "stop-all"])
1856
  if badnodes:
1857
    ToStderr("Failed to stop daemons on %s." % (", ".join(badnodes),))
1858
    return (False, rollback)
1859

    
1860
  backuptar = os.path.join(pathutils.LOCALSTATEDIR,
1861
                           "lib/ganeti%d.tar" % time.time())
1862
  ToStdout("Backing up configuration as %s" % backuptar)
1863
  if not _RunCommandAndReport(["tar", "cf", backuptar,
1864
                               pathutils.DATA_DIR]):
1865
    return (False, rollback)
1866

    
1867
  return (True, rollback)
1868

    
1869

    
1870
def _VersionSpecificDowngrade():
1871
  """
1872
  Perform any additional downrade tasks that are version specific
1873
  and need to be done just after the configuration downgrade. This
1874
  function needs to be idempotent, so that it can be redone if the
1875
  downgrade procedure gets interrupted after changing the
1876
  configuration.
1877

1878
  Note that this function has to be reset with every version bump.
1879

1880
  @return: True upon success
1881
  """
1882
  ToStdout("Performing version-specific downgrade tasks.")
1883

    
1884
  ToStdout("...removing client certificates ssconf file")
1885
  ssconffile = ssconf.SimpleStore().KeyToFilename(
1886
    constants.SS_MASTER_CANDIDATES_CERTS)
1887
  badnodes = _VerifyCommand(["rm", "-f", ssconffile])
1888
  if badnodes:
1889
    ToStderr("Warning: failed to clean up ssconf on %s."
1890
             % (", ".join(badnodes),))
1891
    return False
1892

    
1893
  ToStdout("...removing client certificates")
1894
  badnodes = _VerifyCommand(["rm", "-f", pathutils.NODED_CLIENT_CERT_FILE])
1895
  if badnodes:
1896
    ToStderr("Warning: failed to clean up certificates on %s."
1897
             % (", ".join(badnodes),))
1898
    return False
1899

    
1900
  return True
1901

    
1902

    
1903
def _SwitchVersionAndConfig(versionstring, downgrade):
1904
  """
1905
  Switch to the new Ganeti version and change the configuration,
1906
  in correct order.
1907

1908
  @type versionstring: string
1909
  @param versionstring: the version to change to
1910
  @type downgrade: bool
1911
  @param downgrade: True, if the configuration should be downgraded
1912
  @rtype: (bool, list)
1913
  @return: tupe of a bool indicating success, and a list of
1914
      additional rollback tasks
1915

1916
  """
1917
  rollback = []
1918
  if downgrade:
1919
    ToStdout("Downgrading configuration")
1920
    if not _RunCommandAndReport([pathutils.CFGUPGRADE, "--downgrade", "-f"]):
1921
      return (False, rollback)
1922
    # Note: version specific downgrades need to be done before switching
1923
    # binaries, so that we still have the knowledgeable binary if the downgrade
1924
    # process gets interrupted at this point.
1925
    if not _VersionSpecificDowngrade():
1926
      return (False, rollback)
1927

    
1928
  # Configuration change is the point of no return. From then onwards, it is
1929
  # safer to push through the up/dowgrade than to try to roll it back.
1930

    
1931
  ToStdout("Switching to version %s on all nodes" % versionstring)
1932
  rollback.append(lambda: _SetGanetiVersion(constants.DIR_VERSION))
1933
  badnodes = _SetGanetiVersion(versionstring)
1934
  if badnodes:
1935
    ToStderr("Failed to switch to Ganeti version %s on nodes %s"
1936
             % (versionstring, ", ".join(badnodes)))
1937
    if not downgrade:
1938
      return (False, rollback)
1939

    
1940
  # Now that we have changed to the new version of Ganeti we should
1941
  # not communicate over luxi any more, as luxi might have changed in
1942
  # incompatible ways. Therefore, manually call the corresponding ganeti
1943
  # commands using their canonical (version independent) path.
1944

    
1945
  if not downgrade:
1946
    ToStdout("Upgrading configuration")
1947
    if not _RunCommandAndReport([pathutils.CFGUPGRADE, "-f"]):
1948
      return (False, rollback)
1949

    
1950
  return (True, rollback)
1951

    
1952

    
1953
def _UpgradeAfterConfigurationChange():
1954
  """
1955
  Carry out the upgrade actions necessary after switching to the new
1956
  Ganeti version and updating the configuration.
1957

1958
  As this part is run at a time where the new version of Ganeti is already
1959
  running, no communication should happen via luxi, as this is not a stable
1960
  interface. Also, as the configuration change is the point of no return,
1961
  all actions are pushed trough, even if some of them fail.
1962

1963
  @rtype: int
1964
  @return: the intended return value
1965

1966
  """
1967
  returnvalue = 0
1968

    
1969
  ToStdout("Starting daemons everywhere.")
1970
  badnodes = _VerifyCommand([pathutils.DAEMON_UTIL, "start-all"])
1971
  if badnodes:
1972
    ToStderr("Warning: failed to start daemons on %s." % (", ".join(badnodes),))
1973
    returnvalue = 1
1974

    
1975
  ToStdout("Ensuring directories everywhere.")
1976
  badnodes = _VerifyCommand([pathutils.ENSURE_DIRS])
1977
  if badnodes:
1978
    ToStderr("Warning: failed to ensure directories on %s." %
1979
             (", ".join(badnodes)))
1980
    returnvalue = 1
1981

    
1982
  ToStdout("Redistributing the configuration.")
1983
  if not _RunCommandAndReport(["gnt-cluster", "redist-conf", "--yes-do-it"]):
1984
    returnvalue = 1
1985

    
1986
  ToStdout("Restarting daemons everywhere.")
1987
  badnodes = _VerifyCommand([pathutils.DAEMON_UTIL, "stop-all"])
1988
  badnodes.extend(_VerifyCommand([pathutils.DAEMON_UTIL, "start-all"]))
1989
  if badnodes:
1990
    ToStderr("Warning: failed to start daemons on %s." %
1991
             (", ".join(list(set(badnodes))),))
1992
    returnvalue = 1
1993

    
1994
  ToStdout("Undraining the queue.")
1995
  if not _RunCommandAndReport(["gnt-cluster", "queue", "undrain"]):
1996
    returnvalue = 1
1997

    
1998
  _RunCommandAndReport(["rm", "-f", pathutils.INTENT_TO_UPGRADE])
1999

    
2000
  ToStdout("Verifying cluster.")
2001
  if not _RunCommandAndReport(["gnt-cluster", "verify"]):
2002
    returnvalue = 1
2003

    
2004
  return returnvalue
2005

    
2006

    
2007
def UpgradeGanetiCommand(opts, args):
2008
  """Upgrade a cluster to a new ganeti version.
2009

2010
  @param opts: the command line options selected by the user
2011
  @type args: list
2012
  @param args: should be an empty list
2013
  @rtype: int
2014
  @return: the desired exit code
2015

2016
  """
2017
  if ((not opts.resume and opts.to is None)
2018
      or (opts.resume and opts.to is not None)):
2019
    ToStderr("Precisely one of the options --to and --resume"
2020
             " has to be given")
2021
    return 1
2022

    
2023
  if opts.resume:
2024
    ssconf.CheckMaster(False)
2025
    versionstring = _ReadIntentToUpgrade()
2026
    if versionstring is None:
2027
      return 0
2028
    version = utils.version.ParseVersion(versionstring)
2029
    if version is None:
2030
      return 1
2031
    configversion = _GetConfigVersion()
2032
    if configversion is None:
2033
      return 1
2034
    # If the upgrade we resume was an upgrade between compatible
2035
    # versions (like 2.10.0 to 2.10.1), the correct configversion
2036
    # does not guarantee that the config has been updated.
2037
    # However, in the case of a compatible update with the configuration
2038
    # not touched, we are running a different dirversion with the same
2039
    # config version.
2040
    config_already_modified = \
2041
      (utils.IsCorrectConfigVersion(version, configversion) and
2042
       not (versionstring != constants.DIR_VERSION and
2043
            configversion == (constants.CONFIG_MAJOR, constants.CONFIG_MINOR,
2044
                              constants.CONFIG_REVISION)))
2045
    if not config_already_modified:
2046
      # We have to start from the beginning; however, some daemons might have
2047
      # already been stopped, so the only way to get into a well-defined state
2048
      # is by starting all daemons again.
2049
      _VerifyCommand([pathutils.DAEMON_UTIL, "start-all"])
2050
  else:
2051
    versionstring = opts.to
2052
    config_already_modified = False
2053
    version = utils.version.ParseVersion(versionstring)
2054
    if version is None:
2055
      ToStderr("Could not parse version string %s" % versionstring)
2056
      return 1
2057

    
2058
  msg = utils.version.UpgradeRange(version)
2059
  if msg is not None:
2060
    ToStderr("Cannot upgrade to %s: %s" % (versionstring, msg))
2061
    return 1
2062

    
2063
  if not config_already_modified:
2064
    success, rollback = _UpgradeBeforeConfigurationChange(versionstring)
2065
    if not success:
2066
      _ExecuteCommands(rollback)
2067
      return 1
2068
  else:
2069
    rollback = []
2070

    
2071
  downgrade = utils.version.ShouldCfgdowngrade(version)
2072

    
2073
  success, additionalrollback =  \
2074
      _SwitchVersionAndConfig(versionstring, downgrade)
2075
  if not success:
2076
    rollback.extend(additionalrollback)
2077
    _ExecuteCommands(rollback)
2078
    return 1
2079

    
2080
  return _UpgradeAfterConfigurationChange()
2081

    
2082

    
2083
commands = {
2084
  "init": (
2085
    InitCluster, [ArgHost(min=1, max=1)],
2086
    [BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, GLOBAL_FILEDIR_OPT,
2087
     HVLIST_OPT, MAC_PREFIX_OPT, MASTER_NETDEV_OPT, MASTER_NETMASK_OPT,
2088
     NIC_PARAMS_OPT, NOMODIFY_ETCHOSTS_OPT, NOMODIFY_SSH_SETUP_OPT,
2089
     SECONDARY_IP_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT, UIDPOOL_OPT,
2090
     DRBD_HELPER_OPT, DEFAULT_IALLOCATOR_OPT, DEFAULT_IALLOCATOR_PARAMS_OPT,
2091
     PRIMARY_IP_VERSION_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT,
2092
     GLOBAL_SHARED_FILEDIR_OPT, USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT,
2093
     HV_STATE_OPT, DISK_STATE_OPT, ENABLED_DISK_TEMPLATES_OPT,
2094
     IPOLICY_STD_SPECS_OPT, GLOBAL_GLUSTER_FILEDIR_OPT]
2095
     + INSTANCE_POLICY_OPTS + SPLIT_ISPECS_OPTS,
2096
    "[opts...] <cluster_name>", "Initialises a new cluster configuration"),
2097
  "destroy": (
2098
    DestroyCluster, ARGS_NONE, [YES_DOIT_OPT],
2099
    "", "Destroy cluster"),
2100
  "rename": (
2101
    RenameCluster, [ArgHost(min=1, max=1)],
2102
    [FORCE_OPT, DRY_RUN_OPT],
2103
    "<new_name>",
2104
    "Renames the cluster"),
2105
  "redist-conf": (
2106
    RedistributeConfig, ARGS_NONE, SUBMIT_OPTS +
2107
    [DRY_RUN_OPT, PRIORITY_OPT, FORCE_DISTRIBUTION],
2108
    "", "Forces a push of the configuration file and ssconf files"
2109
    " to the nodes in the cluster"),
2110
  "verify": (
2111
    VerifyCluster, ARGS_NONE,
2112
    [VERBOSE_OPT, DEBUG_SIMERR_OPT, ERROR_CODES_OPT, NONPLUS1_OPT,
2113
     DRY_RUN_OPT, PRIORITY_OPT, NODEGROUP_OPT, IGNORE_ERRORS_OPT],
2114
    "", "Does a check on the cluster configuration"),
2115
  "verify-disks": (
2116
    VerifyDisks, ARGS_NONE, [PRIORITY_OPT],
2117
    "", "Does a check on the cluster disk status"),
2118
  "repair-disk-sizes": (
2119
    RepairDiskSizes, ARGS_MANY_INSTANCES, [DRY_RUN_OPT, PRIORITY_OPT],
2120
    "[instance...]", "Updates mismatches in recorded disk sizes"),
2121
  "master-failover": (
2122
    MasterFailover, ARGS_NONE, [NOVOTING_OPT, FORCE_FAILOVER],
2123
    "", "Makes the current node the master"),
2124
  "master-ping": (
2125
    MasterPing, ARGS_NONE, [],
2126
    "", "Checks if the master is alive"),
2127
  "version": (
2128
    ShowClusterVersion, ARGS_NONE, [],
2129
    "", "Shows the cluster version"),
2130
  "getmaster": (
2131
    ShowClusterMaster, ARGS_NONE, [],
2132
    "", "Shows the cluster master"),
2133
  "copyfile": (
2134
    ClusterCopyFile, [ArgFile(min=1, max=1)],
2135
    [NODE_LIST_OPT, USE_REPL_NET_OPT, NODEGROUP_OPT],
2136
    "[-n node...] <filename>", "Copies a file to all (or only some) nodes"),
2137
  "command": (
2138
    RunClusterCommand, [ArgCommand(min=1)],
2139
    [NODE_LIST_OPT, NODEGROUP_OPT, SHOW_MACHINE_OPT, FAILURE_ONLY_OPT],
2140
    "[-n node...] <command>", "Runs a command on all (or only some) nodes"),
2141
  "info": (
2142
    ShowClusterConfig, ARGS_NONE, [ROMAN_OPT],
2143
    "[--roman]", "Show cluster configuration"),
2144
  "list-tags": (
2145
    ListTags, ARGS_NONE, [], "", "List the tags of the cluster"),
2146
  "add-tags": (
2147
    AddTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
2148
    "tag...", "Add tags to the cluster"),
2149
  "remove-tags": (
2150
    RemoveTags, [ArgUnknown()], [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
2151
    "tag...", "Remove tags from the cluster"),
2152
  "search-tags": (
2153
    SearchTags, [ArgUnknown(min=1, max=1)], [PRIORITY_OPT], "",
2154
    "Searches the tags on all objects on"
2155
    " the cluster for a given pattern (regex)"),
2156
  "queue": (
2157
    QueueOps,
2158
    [ArgChoice(min=1, max=1, choices=["drain", "undrain", "info"])],
2159
    [], "drain|undrain|info", "Change queue properties"),
2160
  "watcher": (
2161
    WatcherOps,
2162
    [ArgChoice(min=1, max=1, choices=["pause", "continue", "info"]),
2163
     ArgSuggest(min=0, max=1, choices=["30m", "1h", "4h"])],
2164
    [],
2165
    "{pause <timespec>|continue|info}", "Change watcher properties"),
2166
  "modify": (
2167
    SetClusterParams, ARGS_NONE,
2168
    [FORCE_OPT,
2169
     BACKEND_OPT, CP_SIZE_OPT, ENABLED_HV_OPT, HVLIST_OPT, MASTER_NETDEV_OPT,
2170
     MASTER_NETMASK_OPT, NIC_PARAMS_OPT, VG_NAME_OPT, MAINTAIN_NODE_HEALTH_OPT,
2171
     UIDPOOL_OPT, ADD_UIDS_OPT, REMOVE_UIDS_OPT, DRBD_HELPER_OPT,
2172
     DEFAULT_IALLOCATOR_OPT, DEFAULT_IALLOCATOR_PARAMS_OPT, RESERVED_LVS_OPT,
2173
     DRY_RUN_OPT, PRIORITY_OPT, PREALLOC_WIPE_DISKS_OPT, NODE_PARAMS_OPT,
2174
     USE_EXTERNAL_MIP_SCRIPT, DISK_PARAMS_OPT, HV_STATE_OPT, DISK_STATE_OPT] +
2175
     SUBMIT_OPTS +
2176
     [ENABLED_DISK_TEMPLATES_OPT, IPOLICY_STD_SPECS_OPT, MODIFY_ETCHOSTS_OPT] +
2177
     INSTANCE_POLICY_OPTS + [GLOBAL_FILEDIR_OPT],
2178
    "[opts...]",
2179
    "Alters the parameters of the cluster"),
2180
  "renew-crypto": (
2181
    RenewCrypto, ARGS_NONE,
2182
    [NEW_CLUSTER_CERT_OPT, NEW_RAPI_CERT_OPT, RAPI_CERT_OPT,
2183
     NEW_CONFD_HMAC_KEY_OPT, FORCE_OPT,
2184
     NEW_CLUSTER_DOMAIN_SECRET_OPT, CLUSTER_DOMAIN_SECRET_OPT,
2185
     NEW_SPICE_CERT_OPT, SPICE_CERT_OPT, SPICE_CACERT_OPT,
2186
     NEW_NODE_CERT_OPT],
2187
    "[opts...]",
2188
    "Renews cluster certificates, keys and secrets"),
2189
  "epo": (
2190
    Epo, [ArgUnknown()],
2191
    [FORCE_OPT, ON_OPT, GROUPS_OPT, ALL_OPT, OOB_TIMEOUT_OPT,
2192
     SHUTDOWN_TIMEOUT_OPT, POWER_DELAY_OPT],
2193
    "[opts...] [args]",
2194
    "Performs an emergency power-off on given args"),
2195
  "activate-master-ip": (
2196
    ActivateMasterIp, ARGS_NONE, [], "", "Activates the master IP"),
2197
  "deactivate-master-ip": (
2198
    DeactivateMasterIp, ARGS_NONE, [CONFIRM_OPT], "",
2199
    "Deactivates the master IP"),
2200
  "show-ispecs-cmd": (
2201
    ShowCreateCommand, ARGS_NONE, [], "",
2202
    "Show the command line to re-create the cluster"),
2203
  "upgrade": (
2204
    UpgradeGanetiCommand, ARGS_NONE, [TO_OPT, RESUME_OPT], "",
2205
    "Upgrade (or downgrade) to a new Ganeti version"),
2206
  }
2207

    
2208

    
2209
#: dictionary with aliases for commands
2210
aliases = {
2211
  "masterfailover": "master-failover",
2212
  "show": "info",
2213
}
2214

    
2215

    
2216
def Main():
2217
  return GenericMain(commands, override={"tag_type": constants.TAG_CLUSTER},
2218
                     aliases=aliases)