Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 57de31c0

History | View | Annotate | Download (105.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46

    
47
from optparse import (OptionParser, TitledHelpFormatter,
48
                      Option, OptionValueError)
49

    
50

    
51
__all__ = [
52
  # Command line options
53
  "ADD_UIDS_OPT",
54
  "ALLOCATABLE_OPT",
55
  "ALLOC_POLICY_OPT",
56
  "ALL_OPT",
57
  "ALLOW_FAILOVER_OPT",
58
  "AUTO_PROMOTE_OPT",
59
  "AUTO_REPLACE_OPT",
60
  "BACKEND_OPT",
61
  "BLK_OS_OPT",
62
  "CAPAB_MASTER_OPT",
63
  "CAPAB_VM_OPT",
64
  "CLEANUP_OPT",
65
  "CLUSTER_DOMAIN_SECRET_OPT",
66
  "CONFIRM_OPT",
67
  "CP_SIZE_OPT",
68
  "DEBUG_OPT",
69
  "DEBUG_SIMERR_OPT",
70
  "DISKIDX_OPT",
71
  "DISK_OPT",
72
  "DISK_TEMPLATE_OPT",
73
  "DRAINED_OPT",
74
  "DRY_RUN_OPT",
75
  "DRBD_HELPER_OPT",
76
  "DST_NODE_OPT",
77
  "EARLY_RELEASE_OPT",
78
  "ENABLED_HV_OPT",
79
  "ERROR_CODES_OPT",
80
  "FIELDS_OPT",
81
  "FILESTORE_DIR_OPT",
82
  "FILESTORE_DRIVER_OPT",
83
  "FORCE_FILTER_OPT",
84
  "FORCE_OPT",
85
  "FORCE_VARIANT_OPT",
86
  "GLOBAL_FILEDIR_OPT",
87
  "HID_OS_OPT",
88
  "GLOBAL_SHARED_FILEDIR_OPT",
89
  "HVLIST_OPT",
90
  "HVOPTS_OPT",
91
  "HYPERVISOR_OPT",
92
  "IALLOCATOR_OPT",
93
  "DEFAULT_IALLOCATOR_OPT",
94
  "IDENTIFY_DEFAULTS_OPT",
95
  "IGNORE_CONSIST_OPT",
96
  "IGNORE_ERRORS_OPT",
97
  "IGNORE_FAILURES_OPT",
98
  "IGNORE_OFFLINE_OPT",
99
  "IGNORE_REMOVE_FAILURES_OPT",
100
  "IGNORE_SECONDARIES_OPT",
101
  "IGNORE_SIZE_OPT",
102
  "INTERVAL_OPT",
103
  "MAC_PREFIX_OPT",
104
  "MAINTAIN_NODE_HEALTH_OPT",
105
  "MASTER_NETDEV_OPT",
106
  "MASTER_NETMASK_OPT",
107
  "MC_OPT",
108
  "MIGRATION_MODE_OPT",
109
  "NET_OPT",
110
  "NEW_CLUSTER_CERT_OPT",
111
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
112
  "NEW_CONFD_HMAC_KEY_OPT",
113
  "NEW_RAPI_CERT_OPT",
114
  "NEW_SECONDARY_OPT",
115
  "NEW_SPICE_CERT_OPT",
116
  "NIC_PARAMS_OPT",
117
  "NODE_FORCE_JOIN_OPT",
118
  "NODE_LIST_OPT",
119
  "NODE_PLACEMENT_OPT",
120
  "NODEGROUP_OPT",
121
  "NODE_PARAMS_OPT",
122
  "NODE_POWERED_OPT",
123
  "NODRBD_STORAGE_OPT",
124
  "NOHDR_OPT",
125
  "NOIPCHECK_OPT",
126
  "NO_INSTALL_OPT",
127
  "NONAMECHECK_OPT",
128
  "NOLVM_STORAGE_OPT",
129
  "NOMODIFY_ETCHOSTS_OPT",
130
  "NOMODIFY_SSH_SETUP_OPT",
131
  "NONICS_OPT",
132
  "NONLIVE_OPT",
133
  "NONPLUS1_OPT",
134
  "NOSHUTDOWN_OPT",
135
  "NOSTART_OPT",
136
  "NOSSH_KEYCHECK_OPT",
137
  "NOVOTING_OPT",
138
  "NO_REMEMBER_OPT",
139
  "NWSYNC_OPT",
140
  "OFFLINE_INST_OPT",
141
  "ONLINE_INST_OPT",
142
  "ON_PRIMARY_OPT",
143
  "ON_SECONDARY_OPT",
144
  "OFFLINE_OPT",
145
  "OSPARAMS_OPT",
146
  "OS_OPT",
147
  "OS_SIZE_OPT",
148
  "OOB_TIMEOUT_OPT",
149
  "POWER_DELAY_OPT",
150
  "PREALLOC_WIPE_DISKS_OPT",
151
  "PRIMARY_IP_VERSION_OPT",
152
  "PRIMARY_ONLY_OPT",
153
  "PRIORITY_OPT",
154
  "RAPI_CERT_OPT",
155
  "READD_OPT",
156
  "REBOOT_TYPE_OPT",
157
  "REMOVE_INSTANCE_OPT",
158
  "REMOVE_UIDS_OPT",
159
  "RESERVED_LVS_OPT",
160
  "ROMAN_OPT",
161
  "SECONDARY_IP_OPT",
162
  "SECONDARY_ONLY_OPT",
163
  "SELECT_OS_OPT",
164
  "SEP_OPT",
165
  "SHOWCMD_OPT",
166
  "SHUTDOWN_TIMEOUT_OPT",
167
  "SINGLE_NODE_OPT",
168
  "SPICE_CACERT_OPT",
169
  "SPICE_CERT_OPT",
170
  "SRC_DIR_OPT",
171
  "SRC_NODE_OPT",
172
  "SUBMIT_OPT",
173
  "STARTUP_PAUSED_OPT",
174
  "STATIC_OPT",
175
  "SYNC_OPT",
176
  "TAG_ADD_OPT",
177
  "TAG_SRC_OPT",
178
  "TIMEOUT_OPT",
179
  "TO_GROUP_OPT",
180
  "UIDPOOL_OPT",
181
  "USEUNITS_OPT",
182
  "USE_EXTERNAL_MIP_SCRIPT",
183
  "USE_REPL_NET_OPT",
184
  "VERBOSE_OPT",
185
  "VG_NAME_OPT",
186
  "YES_DOIT_OPT",
187
  # Generic functions for CLI programs
188
  "ConfirmOperation",
189
  "GenericMain",
190
  "GenericInstanceCreate",
191
  "GenericList",
192
  "GenericListFields",
193
  "GetClient",
194
  "GetOnlineNodes",
195
  "JobExecutor",
196
  "JobSubmittedException",
197
  "ParseTimespec",
198
  "RunWhileClusterStopped",
199
  "SubmitOpCode",
200
  "SubmitOrSend",
201
  "UsesRPC",
202
  # Formatting functions
203
  "ToStderr", "ToStdout",
204
  "FormatError",
205
  "FormatQueryResult",
206
  "FormatParameterDict",
207
  "GenerateTable",
208
  "AskUser",
209
  "FormatTimestamp",
210
  "FormatLogMessage",
211
  # Tags functions
212
  "ListTags",
213
  "AddTags",
214
  "RemoveTags",
215
  # command line options support infrastructure
216
  "ARGS_MANY_INSTANCES",
217
  "ARGS_MANY_NODES",
218
  "ARGS_MANY_GROUPS",
219
  "ARGS_NONE",
220
  "ARGS_ONE_INSTANCE",
221
  "ARGS_ONE_NODE",
222
  "ARGS_ONE_GROUP",
223
  "ARGS_ONE_OS",
224
  "ArgChoice",
225
  "ArgCommand",
226
  "ArgFile",
227
  "ArgGroup",
228
  "ArgHost",
229
  "ArgInstance",
230
  "ArgJobId",
231
  "ArgNode",
232
  "ArgOs",
233
  "ArgSuggest",
234
  "ArgUnknown",
235
  "OPT_COMPL_INST_ADD_NODES",
236
  "OPT_COMPL_MANY_NODES",
237
  "OPT_COMPL_ONE_IALLOCATOR",
238
  "OPT_COMPL_ONE_INSTANCE",
239
  "OPT_COMPL_ONE_NODE",
240
  "OPT_COMPL_ONE_NODEGROUP",
241
  "OPT_COMPL_ONE_OS",
242
  "cli_option",
243
  "SplitNodeOption",
244
  "CalculateOSNames",
245
  "ParseFields",
246
  "COMMON_CREATE_OPTS",
247
  ]
248

    
249
NO_PREFIX = "no_"
250
UN_PREFIX = "-"
251

    
252
#: Priorities (sorted)
253
_PRIORITY_NAMES = [
254
  ("low", constants.OP_PRIO_LOW),
255
  ("normal", constants.OP_PRIO_NORMAL),
256
  ("high", constants.OP_PRIO_HIGH),
257
  ]
258

    
259
#: Priority dictionary for easier lookup
260
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
261
# we migrate to Python 2.6
262
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
263

    
264
# Query result status for clients
265
(QR_NORMAL,
266
 QR_UNKNOWN,
267
 QR_INCOMPLETE) = range(3)
268

    
269
#: Maximum batch size for ChooseJob
270
_CHOOSE_BATCH = 25
271

    
272

    
273
class _Argument:
274
  def __init__(self, min=0, max=None): # pylint: disable=W0622
275
    self.min = min
276
    self.max = max
277

    
278
  def __repr__(self):
279
    return ("<%s min=%s max=%s>" %
280
            (self.__class__.__name__, self.min, self.max))
281

    
282

    
283
class ArgSuggest(_Argument):
284
  """Suggesting argument.
285

286
  Value can be any of the ones passed to the constructor.
287

288
  """
289
  # pylint: disable=W0622
290
  def __init__(self, min=0, max=None, choices=None):
291
    _Argument.__init__(self, min=min, max=max)
292
    self.choices = choices
293

    
294
  def __repr__(self):
295
    return ("<%s min=%s max=%s choices=%r>" %
296
            (self.__class__.__name__, self.min, self.max, self.choices))
297

    
298

    
299
class ArgChoice(ArgSuggest):
300
  """Choice argument.
301

302
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
303
  but value must be one of the choices.
304

305
  """
306

    
307

    
308
class ArgUnknown(_Argument):
309
  """Unknown argument to program (e.g. determined at runtime).
310

311
  """
312

    
313

    
314
class ArgInstance(_Argument):
315
  """Instances argument.
316

317
  """
318

    
319

    
320
class ArgNode(_Argument):
321
  """Node argument.
322

323
  """
324

    
325

    
326
class ArgGroup(_Argument):
327
  """Node group argument.
328

329
  """
330

    
331

    
332
class ArgJobId(_Argument):
333
  """Job ID argument.
334

335
  """
336

    
337

    
338
class ArgFile(_Argument):
339
  """File path argument.
340

341
  """
342

    
343

    
344
class ArgCommand(_Argument):
345
  """Command argument.
346

347
  """
348

    
349

    
350
class ArgHost(_Argument):
351
  """Host argument.
352

353
  """
354

    
355

    
356
class ArgOs(_Argument):
357
  """OS argument.
358

359
  """
360

    
361

    
362
ARGS_NONE = []
363
ARGS_MANY_INSTANCES = [ArgInstance()]
364
ARGS_MANY_NODES = [ArgNode()]
365
ARGS_MANY_GROUPS = [ArgGroup()]
366
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
367
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
368
# TODO
369
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
370
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
371

    
372

    
373
def _ExtractTagsObject(opts, args):
374
  """Extract the tag type object.
375

376
  Note that this function will modify its args parameter.
377

378
  """
379
  if not hasattr(opts, "tag_type"):
380
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
381
  kind = opts.tag_type
382
  if kind == constants.TAG_CLUSTER:
383
    retval = kind, kind
384
  elif kind in (constants.TAG_NODEGROUP,
385
                constants.TAG_NODE,
386
                constants.TAG_INSTANCE):
387
    if not args:
388
      raise errors.OpPrereqError("no arguments passed to the command")
389
    name = args.pop(0)
390
    retval = kind, name
391
  else:
392
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
393
  return retval
394

    
395

    
396
def _ExtendTags(opts, args):
397
  """Extend the args if a source file has been given.
398

399
  This function will extend the tags with the contents of the file
400
  passed in the 'tags_source' attribute of the opts parameter. A file
401
  named '-' will be replaced by stdin.
402

403
  """
404
  fname = opts.tags_source
405
  if fname is None:
406
    return
407
  if fname == "-":
408
    new_fh = sys.stdin
409
  else:
410
    new_fh = open(fname, "r")
411
  new_data = []
412
  try:
413
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
414
    # because of python bug 1633941
415
    while True:
416
      line = new_fh.readline()
417
      if not line:
418
        break
419
      new_data.append(line.strip())
420
  finally:
421
    new_fh.close()
422
  args.extend(new_data)
423

    
424

    
425
def ListTags(opts, args):
426
  """List the tags on a given object.
427

428
  This is a generic implementation that knows how to deal with all
429
  three cases of tag objects (cluster, node, instance). The opts
430
  argument is expected to contain a tag_type field denoting what
431
  object type we work on.
432

433
  """
434
  kind, name = _ExtractTagsObject(opts, args)
435
  cl = GetClient()
436
  result = cl.QueryTags(kind, name)
437
  result = list(result)
438
  result.sort()
439
  for tag in result:
440
    ToStdout(tag)
441

    
442

    
443
def AddTags(opts, args):
444
  """Add tags on a given object.
445

446
  This is a generic implementation that knows how to deal with all
447
  three cases of tag objects (cluster, node, instance). The opts
448
  argument is expected to contain a tag_type field denoting what
449
  object type we work on.
450

451
  """
452
  kind, name = _ExtractTagsObject(opts, args)
453
  _ExtendTags(opts, args)
454
  if not args:
455
    raise errors.OpPrereqError("No tags to be added")
456
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
457
  SubmitOpCode(op, opts=opts)
458

    
459

    
460
def RemoveTags(opts, args):
461
  """Remove tags from a given object.
462

463
  This is a generic implementation that knows how to deal with all
464
  three cases of tag objects (cluster, node, instance). The opts
465
  argument is expected to contain a tag_type field denoting what
466
  object type we work on.
467

468
  """
469
  kind, name = _ExtractTagsObject(opts, args)
470
  _ExtendTags(opts, args)
471
  if not args:
472
    raise errors.OpPrereqError("No tags to be removed")
473
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
474
  SubmitOpCode(op, opts=opts)
475

    
476

    
477
def check_unit(option, opt, value): # pylint: disable=W0613
478
  """OptParsers custom converter for units.
479

480
  """
481
  try:
482
    return utils.ParseUnit(value)
483
  except errors.UnitParseError, err:
484
    raise OptionValueError("option %s: %s" % (opt, err))
485

    
486

    
487
def _SplitKeyVal(opt, data):
488
  """Convert a KeyVal string into a dict.
489

490
  This function will convert a key=val[,...] string into a dict. Empty
491
  values will be converted specially: keys which have the prefix 'no_'
492
  will have the value=False and the prefix stripped, the others will
493
  have value=True.
494

495
  @type opt: string
496
  @param opt: a string holding the option name for which we process the
497
      data, used in building error messages
498
  @type data: string
499
  @param data: a string of the format key=val,key=val,...
500
  @rtype: dict
501
  @return: {key=val, key=val}
502
  @raises errors.ParameterError: if there are duplicate keys
503

504
  """
505
  kv_dict = {}
506
  if data:
507
    for elem in utils.UnescapeAndSplit(data, sep=","):
508
      if "=" in elem:
509
        key, val = elem.split("=", 1)
510
      else:
511
        if elem.startswith(NO_PREFIX):
512
          key, val = elem[len(NO_PREFIX):], False
513
        elif elem.startswith(UN_PREFIX):
514
          key, val = elem[len(UN_PREFIX):], None
515
        else:
516
          key, val = elem, True
517
      if key in kv_dict:
518
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
519
                                    (key, opt))
520
      kv_dict[key] = val
521
  return kv_dict
522

    
523

    
524
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
525
  """Custom parser for ident:key=val,key=val options.
526

527
  This will store the parsed values as a tuple (ident, {key: val}). As such,
528
  multiple uses of this option via action=append is possible.
529

530
  """
531
  if ":" not in value:
532
    ident, rest = value, ""
533
  else:
534
    ident, rest = value.split(":", 1)
535

    
536
  if ident.startswith(NO_PREFIX):
537
    if rest:
538
      msg = "Cannot pass options when removing parameter groups: %s" % value
539
      raise errors.ParameterError(msg)
540
    retval = (ident[len(NO_PREFIX):], False)
541
  elif ident.startswith(UN_PREFIX):
542
    if rest:
543
      msg = "Cannot pass options when removing parameter groups: %s" % value
544
      raise errors.ParameterError(msg)
545
    retval = (ident[len(UN_PREFIX):], None)
546
  else:
547
    kv_dict = _SplitKeyVal(opt, rest)
548
    retval = (ident, kv_dict)
549
  return retval
550

    
551

    
552
def check_key_val(option, opt, value):  # pylint: disable=W0613
553
  """Custom parser class for key=val,key=val options.
554

555
  This will store the parsed values as a dict {key: val}.
556

557
  """
558
  return _SplitKeyVal(opt, value)
559

    
560

    
561
def check_bool(option, opt, value): # pylint: disable=W0613
562
  """Custom parser for yes/no options.
563

564
  This will store the parsed value as either True or False.
565

566
  """
567
  value = value.lower()
568
  if value == constants.VALUE_FALSE or value == "no":
569
    return False
570
  elif value == constants.VALUE_TRUE or value == "yes":
571
    return True
572
  else:
573
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
574

    
575

    
576
# completion_suggestion is normally a list. Using numeric values not evaluating
577
# to False for dynamic completion.
578
(OPT_COMPL_MANY_NODES,
579
 OPT_COMPL_ONE_NODE,
580
 OPT_COMPL_ONE_INSTANCE,
581
 OPT_COMPL_ONE_OS,
582
 OPT_COMPL_ONE_IALLOCATOR,
583
 OPT_COMPL_INST_ADD_NODES,
584
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
585

    
586
OPT_COMPL_ALL = frozenset([
587
  OPT_COMPL_MANY_NODES,
588
  OPT_COMPL_ONE_NODE,
589
  OPT_COMPL_ONE_INSTANCE,
590
  OPT_COMPL_ONE_OS,
591
  OPT_COMPL_ONE_IALLOCATOR,
592
  OPT_COMPL_INST_ADD_NODES,
593
  OPT_COMPL_ONE_NODEGROUP,
594
  ])
595

    
596

    
597
class CliOption(Option):
598
  """Custom option class for optparse.
599

600
  """
601
  ATTRS = Option.ATTRS + [
602
    "completion_suggest",
603
    ]
604
  TYPES = Option.TYPES + (
605
    "identkeyval",
606
    "keyval",
607
    "unit",
608
    "bool",
609
    )
610
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
611
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
612
  TYPE_CHECKER["keyval"] = check_key_val
613
  TYPE_CHECKER["unit"] = check_unit
614
  TYPE_CHECKER["bool"] = check_bool
615

    
616

    
617
# optparse.py sets make_option, so we do it for our own option class, too
618
cli_option = CliOption
619

    
620

    
621
_YORNO = "yes|no"
622

    
623
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
624
                       help="Increase debugging level")
625

    
626
NOHDR_OPT = cli_option("--no-headers", default=False,
627
                       action="store_true", dest="no_headers",
628
                       help="Don't display column headers")
629

    
630
SEP_OPT = cli_option("--separator", default=None,
631
                     action="store", dest="separator",
632
                     help=("Separator between output fields"
633
                           " (defaults to one space)"))
634

    
635
USEUNITS_OPT = cli_option("--units", default=None,
636
                          dest="units", choices=("h", "m", "g", "t"),
637
                          help="Specify units for output (one of h/m/g/t)")
638

    
639
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
640
                        type="string", metavar="FIELDS",
641
                        help="Comma separated list of output fields")
642

    
643
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
644
                       default=False, help="Force the operation")
645

    
646
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
647
                         default=False, help="Do not require confirmation")
648

    
649
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
650
                                  action="store_true", default=False,
651
                                  help=("Ignore offline nodes and do as much"
652
                                        " as possible"))
653

    
654
TAG_ADD_OPT = cli_option("--tags", dest="tags",
655
                         default=None, help="Comma-separated list of instance"
656
                                            " tags")
657

    
658
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
659
                         default=None, help="File with tag names")
660

    
661
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
662
                        default=False, action="store_true",
663
                        help=("Submit the job and return the job ID, but"
664
                              " don't wait for the job to finish"))
665

    
666
SYNC_OPT = cli_option("--sync", dest="do_locking",
667
                      default=False, action="store_true",
668
                      help=("Grab locks while doing the queries"
669
                            " in order to ensure more consistent results"))
670

    
671
DRY_RUN_OPT = cli_option("--dry-run", default=False,
672
                         action="store_true",
673
                         help=("Do not execute the operation, just run the"
674
                               " check steps and verify it it could be"
675
                               " executed"))
676

    
677
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
678
                         action="store_true",
679
                         help="Increase the verbosity of the operation")
680

    
681
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
682
                              action="store_true", dest="simulate_errors",
683
                              help="Debugging option that makes the operation"
684
                              " treat most runtime checks as failed")
685

    
686
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
687
                        default=True, action="store_false",
688
                        help="Don't wait for sync (DANGEROUS!)")
689

    
690
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
691
                             action="store_true", default=False,
692
                             help="Enable offline instance")
693

    
694
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
695
                              action="store_true", default=False,
696
                              help="Disable down instance")
697

    
698
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
699
                               help=("Custom disk setup (%s)" %
700
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
701
                               default=None, metavar="TEMPL",
702
                               choices=list(constants.DISK_TEMPLATES))
703

    
704
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
705
                        help="Do not create any network cards for"
706
                        " the instance")
707

    
708
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
709
                               help="Relative path under default cluster-wide"
710
                               " file storage dir to store file-based disks",
711
                               default=None, metavar="<DIR>")
712

    
713
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
714
                                  help="Driver to use for image files",
715
                                  default="loop", metavar="<DRIVER>",
716
                                  choices=list(constants.FILE_DRIVER))
717

    
718
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
719
                            help="Select nodes for the instance automatically"
720
                            " using the <NAME> iallocator plugin",
721
                            default=None, type="string",
722
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
723

    
724
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
725
                            metavar="<NAME>",
726
                            help="Set the default instance allocator plugin",
727
                            default=None, type="string",
728
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
729

    
730
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
731
                    metavar="<os>",
732
                    completion_suggest=OPT_COMPL_ONE_OS)
733

    
734
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
735
                         type="keyval", default={},
736
                         help="OS parameters")
737

    
738
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
739
                               action="store_true", default=False,
740
                               help="Force an unknown variant")
741

    
742
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
743
                            action="store_true", default=False,
744
                            help="Do not install the OS (will"
745
                            " enable no-start)")
746

    
747
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
748
                         type="keyval", default={},
749
                         help="Backend parameters")
750

    
751
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
752
                        default={}, dest="hvparams",
753
                        help="Hypervisor parameters")
754

    
755
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
756
                            help="Hypervisor and hypervisor options, in the"
757
                            " format hypervisor:option=value,option=value,...",
758
                            default=None, type="identkeyval")
759

    
760
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
761
                        help="Hypervisor and hypervisor options, in the"
762
                        " format hypervisor:option=value,option=value,...",
763
                        default=[], action="append", type="identkeyval")
764

    
765
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
766
                           action="store_false",
767
                           help="Don't check that the instance's IP"
768
                           " is alive")
769

    
770
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
771
                             default=True, action="store_false",
772
                             help="Don't check that the instance's name"
773
                             " is resolvable")
774

    
775
NET_OPT = cli_option("--net",
776
                     help="NIC parameters", default=[],
777
                     dest="nics", action="append", type="identkeyval")
778

    
779
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
780
                      dest="disks", action="append", type="identkeyval")
781

    
782
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
783
                         help="Comma-separated list of disks"
784
                         " indices to act on (e.g. 0,2) (optional,"
785
                         " defaults to all disks)")
786

    
787
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
788
                         help="Enforces a single-disk configuration using the"
789
                         " given disk size, in MiB unless a suffix is used",
790
                         default=None, type="unit", metavar="<size>")
791

    
792
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
793
                                dest="ignore_consistency",
794
                                action="store_true", default=False,
795
                                help="Ignore the consistency of the disks on"
796
                                " the secondary")
797

    
798
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
799
                                dest="allow_failover",
800
                                action="store_true", default=False,
801
                                help="If migration is not possible fallback to"
802
                                     " failover")
803

    
804
NONLIVE_OPT = cli_option("--non-live", dest="live",
805
                         default=True, action="store_false",
806
                         help="Do a non-live migration (this usually means"
807
                         " freeze the instance, save the state, transfer and"
808
                         " only then resume running on the secondary node)")
809

    
810
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
811
                                default=None,
812
                                choices=list(constants.HT_MIGRATION_MODES),
813
                                help="Override default migration mode (choose"
814
                                " either live or non-live")
815

    
816
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
817
                                help="Target node and optional secondary node",
818
                                metavar="<pnode>[:<snode>]",
819
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
820

    
821
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
822
                           action="append", metavar="<node>",
823
                           help="Use only this node (can be used multiple"
824
                           " times, if not given defaults to all nodes)",
825
                           completion_suggest=OPT_COMPL_ONE_NODE)
826

    
827
NODEGROUP_OPT_NAME = "--node-group"
828
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
829
                           dest="nodegroup",
830
                           help="Node group (name or uuid)",
831
                           metavar="<nodegroup>",
832
                           default=None, type="string",
833
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
834

    
835
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
836
                             metavar="<node>",
837
                             completion_suggest=OPT_COMPL_ONE_NODE)
838

    
839
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
840
                         action="store_false",
841
                         help="Don't start the instance after creation")
842

    
843
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
844
                         action="store_true", default=False,
845
                         help="Show command instead of executing it")
846

    
847
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
848
                         default=False, action="store_true",
849
                         help="Instead of performing the migration, try to"
850
                         " recover from a failed cleanup. This is safe"
851
                         " to run even if the instance is healthy, but it"
852
                         " will create extra replication traffic and "
853
                         " disrupt briefly the replication (like during the"
854
                         " migration")
855

    
856
STATIC_OPT = cli_option("-s", "--static", dest="static",
857
                        action="store_true", default=False,
858
                        help="Only show configuration data, not runtime data")
859

    
860
ALL_OPT = cli_option("--all", dest="show_all",
861
                     default=False, action="store_true",
862
                     help="Show info on all instances on the cluster."
863
                     " This can take a long time to run, use wisely")
864

    
865
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
866
                           action="store_true", default=False,
867
                           help="Interactive OS reinstall, lists available"
868
                           " OS templates for selection")
869

    
870
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
871
                                 action="store_true", default=False,
872
                                 help="Remove the instance from the cluster"
873
                                 " configuration even if there are failures"
874
                                 " during the removal process")
875

    
876
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
877
                                        dest="ignore_remove_failures",
878
                                        action="store_true", default=False,
879
                                        help="Remove the instance from the"
880
                                        " cluster configuration even if there"
881
                                        " are failures during the removal"
882
                                        " process")
883

    
884
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
885
                                 action="store_true", default=False,
886
                                 help="Remove the instance from the cluster")
887

    
888
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
889
                               help="Specifies the new node for the instance",
890
                               metavar="NODE", default=None,
891
                               completion_suggest=OPT_COMPL_ONE_NODE)
892

    
893
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
894
                               help="Specifies the new secondary node",
895
                               metavar="NODE", default=None,
896
                               completion_suggest=OPT_COMPL_ONE_NODE)
897

    
898
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
899
                            default=False, action="store_true",
900
                            help="Replace the disk(s) on the primary"
901
                                 " node (applies only to internally mirrored"
902
                                 " disk templates, e.g. %s)" %
903
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
904

    
905
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
906
                              default=False, action="store_true",
907
                              help="Replace the disk(s) on the secondary"
908
                                   " node (applies only to internally mirrored"
909
                                   " disk templates, e.g. %s)" %
910
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
911

    
912
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
913
                              default=False, action="store_true",
914
                              help="Lock all nodes and auto-promote as needed"
915
                              " to MC status")
916

    
917
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
918
                              default=False, action="store_true",
919
                              help="Automatically replace faulty disks"
920
                                   " (applies only to internally mirrored"
921
                                   " disk templates, e.g. %s)" %
922
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
923

    
924
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
925
                             default=False, action="store_true",
926
                             help="Ignore current recorded size"
927
                             " (useful for forcing activation when"
928
                             " the recorded size is wrong)")
929

    
930
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
931
                          metavar="<node>",
932
                          completion_suggest=OPT_COMPL_ONE_NODE)
933

    
934
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
935
                         metavar="<dir>")
936

    
937
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
938
                              help="Specify the secondary ip for the node",
939
                              metavar="ADDRESS", default=None)
940

    
941
READD_OPT = cli_option("--readd", dest="readd",
942
                       default=False, action="store_true",
943
                       help="Readd old node after replacing it")
944

    
945
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
946
                                default=True, action="store_false",
947
                                help="Disable SSH key fingerprint checking")
948

    
949
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
950
                                 default=False, action="store_true",
951
                                 help="Force the joining of a node")
952

    
953
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
954
                    type="bool", default=None, metavar=_YORNO,
955
                    help="Set the master_candidate flag on the node")
956

    
957
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
958
                         type="bool", default=None,
959
                         help=("Set the offline flag on the node"
960
                               " (cluster does not communicate with offline"
961
                               " nodes)"))
962

    
963
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
964
                         type="bool", default=None,
965
                         help=("Set the drained flag on the node"
966
                               " (excluded from allocation operations)"))
967

    
968
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
969
                    type="bool", default=None, metavar=_YORNO,
970
                    help="Set the master_capable flag on the node")
971

    
972
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
973
                    type="bool", default=None, metavar=_YORNO,
974
                    help="Set the vm_capable flag on the node")
975

    
976
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
977
                             type="bool", default=None, metavar=_YORNO,
978
                             help="Set the allocatable flag on a volume")
979

    
980
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
981
                               help="Disable support for lvm based instances"
982
                               " (cluster-wide)",
983
                               action="store_false", default=True)
984

    
985
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
986
                            dest="enabled_hypervisors",
987
                            help="Comma-separated list of hypervisors",
988
                            type="string", default=None)
989

    
990
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
991
                            type="keyval", default={},
992
                            help="NIC parameters")
993

    
994
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
995
                         dest="candidate_pool_size", type="int",
996
                         help="Set the candidate pool size")
997

    
998
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
999
                         help=("Enables LVM and specifies the volume group"
1000
                               " name (cluster-wide) for disk allocation"
1001
                               " [%s]" % constants.DEFAULT_VG),
1002
                         metavar="VG", default=None)
1003

    
1004
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1005
                          help="Destroy cluster", action="store_true")
1006

    
1007
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1008
                          help="Skip node agreement check (dangerous)",
1009
                          action="store_true", default=False)
1010

    
1011
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1012
                            help="Specify the mac prefix for the instance IP"
1013
                            " addresses, in the format XX:XX:XX",
1014
                            metavar="PREFIX",
1015
                            default=None)
1016

    
1017
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1018
                               help="Specify the node interface (cluster-wide)"
1019
                               " on which the master IP address will be added"
1020
                               " (cluster init default: %s)" %
1021
                               constants.DEFAULT_BRIDGE,
1022
                               metavar="NETDEV",
1023
                               default=None)
1024

    
1025
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1026
                                help="Specify the netmask of the master IP",
1027
                                metavar="NETMASK",
1028
                                default=None)
1029

    
1030
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1031
                                dest="use_external_mip_script",
1032
                                help="Specify whether to run a user-provided"
1033
                                " script for the master IP address turnup and"
1034
                                " turndown operations",
1035
                                type="bool", metavar=_YORNO, default=None)
1036

    
1037
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1038
                                help="Specify the default directory (cluster-"
1039
                                "wide) for storing the file-based disks [%s]" %
1040
                                constants.DEFAULT_FILE_STORAGE_DIR,
1041
                                metavar="DIR",
1042
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1043

    
1044
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1045
                            dest="shared_file_storage_dir",
1046
                            help="Specify the default directory (cluster-"
1047
                            "wide) for storing the shared file-based"
1048
                            " disks [%s]" %
1049
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1050
                            metavar="SHAREDDIR",
1051
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1052

    
1053
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1054
                                   help="Don't modify /etc/hosts",
1055
                                   action="store_false", default=True)
1056

    
1057
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1058
                                    help="Don't initialize SSH keys",
1059
                                    action="store_false", default=True)
1060

    
1061
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1062
                             help="Enable parseable error messages",
1063
                             action="store_true", default=False)
1064

    
1065
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1066
                          help="Skip N+1 memory redundancy tests",
1067
                          action="store_true", default=False)
1068

    
1069
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1070
                             help="Type of reboot: soft/hard/full",
1071
                             default=constants.INSTANCE_REBOOT_HARD,
1072
                             metavar="<REBOOT>",
1073
                             choices=list(constants.REBOOT_TYPES))
1074

    
1075
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1076
                                    dest="ignore_secondaries",
1077
                                    default=False, action="store_true",
1078
                                    help="Ignore errors from secondaries")
1079

    
1080
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1081
                            action="store_false", default=True,
1082
                            help="Don't shutdown the instance (unsafe)")
1083

    
1084
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1085
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1086
                         help="Maximum time to wait")
1087

    
1088
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1089
                         dest="shutdown_timeout", type="int",
1090
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1091
                         help="Maximum time to wait for instance shutdown")
1092

    
1093
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1094
                          default=None,
1095
                          help=("Number of seconds between repetions of the"
1096
                                " command"))
1097

    
1098
EARLY_RELEASE_OPT = cli_option("--early-release",
1099
                               dest="early_release", default=False,
1100
                               action="store_true",
1101
                               help="Release the locks on the secondary"
1102
                               " node(s) early")
1103

    
1104
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1105
                                  dest="new_cluster_cert",
1106
                                  default=False, action="store_true",
1107
                                  help="Generate a new cluster certificate")
1108

    
1109
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1110
                           default=None,
1111
                           help="File containing new RAPI certificate")
1112

    
1113
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1114
                               default=None, action="store_true",
1115
                               help=("Generate a new self-signed RAPI"
1116
                                     " certificate"))
1117

    
1118
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1119
                           default=None,
1120
                           help="File containing new SPICE certificate")
1121

    
1122
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1123
                           default=None,
1124
                           help="File containing the certificate of the CA"
1125
                                " which signed the SPICE certificate")
1126

    
1127
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1128
                               dest="new_spice_cert", default=None,
1129
                               action="store_true",
1130
                               help=("Generate a new self-signed SPICE"
1131
                                     " certificate"))
1132

    
1133
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1134
                                    dest="new_confd_hmac_key",
1135
                                    default=False, action="store_true",
1136
                                    help=("Create a new HMAC key for %s" %
1137
                                          constants.CONFD))
1138

    
1139
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1140
                                       dest="cluster_domain_secret",
1141
                                       default=None,
1142
                                       help=("Load new new cluster domain"
1143
                                             " secret from file"))
1144

    
1145
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1146
                                           dest="new_cluster_domain_secret",
1147
                                           default=False, action="store_true",
1148
                                           help=("Create a new cluster domain"
1149
                                                 " secret"))
1150

    
1151
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1152
                              dest="use_replication_network",
1153
                              help="Whether to use the replication network"
1154
                              " for talking to the nodes",
1155
                              action="store_true", default=False)
1156

    
1157
MAINTAIN_NODE_HEALTH_OPT = \
1158
    cli_option("--maintain-node-health", dest="maintain_node_health",
1159
               metavar=_YORNO, default=None, type="bool",
1160
               help="Configure the cluster to automatically maintain node"
1161
               " health, by shutting down unknown instances, shutting down"
1162
               " unknown DRBD devices, etc.")
1163

    
1164
IDENTIFY_DEFAULTS_OPT = \
1165
    cli_option("--identify-defaults", dest="identify_defaults",
1166
               default=False, action="store_true",
1167
               help="Identify which saved instance parameters are equal to"
1168
               " the current cluster defaults and set them as such, instead"
1169
               " of marking them as overridden")
1170

    
1171
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1172
                         action="store", dest="uid_pool",
1173
                         help=("A list of user-ids or user-id"
1174
                               " ranges separated by commas"))
1175

    
1176
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1177
                          action="store", dest="add_uids",
1178
                          help=("A list of user-ids or user-id"
1179
                                " ranges separated by commas, to be"
1180
                                " added to the user-id pool"))
1181

    
1182
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1183
                             action="store", dest="remove_uids",
1184
                             help=("A list of user-ids or user-id"
1185
                                   " ranges separated by commas, to be"
1186
                                   " removed from the user-id pool"))
1187

    
1188
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1189
                             action="store", dest="reserved_lvs",
1190
                             help=("A comma-separated list of reserved"
1191
                                   " logical volumes names, that will be"
1192
                                   " ignored by cluster verify"))
1193

    
1194
ROMAN_OPT = cli_option("--roman",
1195
                       dest="roman_integers", default=False,
1196
                       action="store_true",
1197
                       help="Use roman numbers for positive integers")
1198

    
1199
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1200
                             action="store", default=None,
1201
                             help="Specifies usermode helper for DRBD")
1202

    
1203
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1204
                                action="store_false", default=True,
1205
                                help="Disable support for DRBD")
1206

    
1207
PRIMARY_IP_VERSION_OPT = \
1208
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1209
               action="store", dest="primary_ip_version",
1210
               metavar="%d|%d" % (constants.IP4_VERSION,
1211
                                  constants.IP6_VERSION),
1212
               help="Cluster-wide IP version for primary IP")
1213

    
1214
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1215
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1216
                          choices=_PRIONAME_TO_VALUE.keys(),
1217
                          help="Priority for opcode processing")
1218

    
1219
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1220
                        type="bool", default=None, metavar=_YORNO,
1221
                        help="Sets the hidden flag on the OS")
1222

    
1223
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1224
                        type="bool", default=None, metavar=_YORNO,
1225
                        help="Sets the blacklisted flag on the OS")
1226

    
1227
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1228
                                     type="bool", metavar=_YORNO,
1229
                                     dest="prealloc_wipe_disks",
1230
                                     help=("Wipe disks prior to instance"
1231
                                           " creation"))
1232

    
1233
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1234
                             type="keyval", default=None,
1235
                             help="Node parameters")
1236

    
1237
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1238
                              action="store", metavar="POLICY", default=None,
1239
                              help="Allocation policy for the node group")
1240

    
1241
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1242
                              type="bool", metavar=_YORNO,
1243
                              dest="node_powered",
1244
                              help="Specify if the SoR for node is powered")
1245

    
1246
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1247
                         default=constants.OOB_TIMEOUT,
1248
                         help="Maximum time to wait for out-of-band helper")
1249

    
1250
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1251
                             default=constants.OOB_POWER_DELAY,
1252
                             help="Time in seconds to wait between power-ons")
1253

    
1254
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1255
                              action="store_true", default=False,
1256
                              help=("Whether command argument should be treated"
1257
                                    " as filter"))
1258

    
1259
NO_REMEMBER_OPT = cli_option("--no-remember",
1260
                             dest="no_remember",
1261
                             action="store_true", default=False,
1262
                             help="Perform but do not record the change"
1263
                             " in the configuration")
1264

    
1265
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1266
                              default=False, action="store_true",
1267
                              help="Evacuate primary instances only")
1268

    
1269
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1270
                                default=False, action="store_true",
1271
                                help="Evacuate secondary instances only"
1272
                                     " (applies only to internally mirrored"
1273
                                     " disk templates, e.g. %s)" %
1274
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1275

    
1276
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1277
                                action="store_true", default=False,
1278
                                help="Pause instance at startup")
1279

    
1280
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1281
                          help="Destination node group (name or uuid)",
1282
                          default=None, action="append",
1283
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1284

    
1285
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1286
                               action="append", dest="ignore_errors",
1287
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1288
                               help="Error code to be ignored")
1289

    
1290

    
1291
#: Options provided by all commands
1292
COMMON_OPTS = [DEBUG_OPT]
1293

    
1294
# common options for creating instances. add and import then add their own
1295
# specific ones.
1296
COMMON_CREATE_OPTS = [
1297
  BACKEND_OPT,
1298
  DISK_OPT,
1299
  DISK_TEMPLATE_OPT,
1300
  FILESTORE_DIR_OPT,
1301
  FILESTORE_DRIVER_OPT,
1302
  HYPERVISOR_OPT,
1303
  IALLOCATOR_OPT,
1304
  NET_OPT,
1305
  NODE_PLACEMENT_OPT,
1306
  NOIPCHECK_OPT,
1307
  NONAMECHECK_OPT,
1308
  NONICS_OPT,
1309
  NWSYNC_OPT,
1310
  OSPARAMS_OPT,
1311
  OS_SIZE_OPT,
1312
  SUBMIT_OPT,
1313
  TAG_ADD_OPT,
1314
  DRY_RUN_OPT,
1315
  PRIORITY_OPT,
1316
  ]
1317

    
1318

    
1319
def _ParseArgs(argv, commands, aliases, env_override):
1320
  """Parser for the command line arguments.
1321

1322
  This function parses the arguments and returns the function which
1323
  must be executed together with its (modified) arguments.
1324

1325
  @param argv: the command line
1326
  @param commands: dictionary with special contents, see the design
1327
      doc for cmdline handling
1328
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1329
  @param env_override: list of env variables allowed for default args
1330

1331
  """
1332
  assert not (env_override - set(commands))
1333

    
1334
  if len(argv) == 0:
1335
    binary = "<command>"
1336
  else:
1337
    binary = argv[0].split("/")[-1]
1338

    
1339
  if len(argv) > 1 and argv[1] == "--version":
1340
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1341
             constants.RELEASE_VERSION)
1342
    # Quit right away. That way we don't have to care about this special
1343
    # argument. optparse.py does it the same.
1344
    sys.exit(0)
1345

    
1346
  if len(argv) < 2 or not (argv[1] in commands or
1347
                           argv[1] in aliases):
1348
    # let's do a nice thing
1349
    sortedcmds = commands.keys()
1350
    sortedcmds.sort()
1351

    
1352
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1353
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1354
    ToStdout("")
1355

    
1356
    # compute the max line length for cmd + usage
1357
    mlen = max([len(" %s" % cmd) for cmd in commands])
1358
    mlen = min(60, mlen) # should not get here...
1359

    
1360
    # and format a nice command list
1361
    ToStdout("Commands:")
1362
    for cmd in sortedcmds:
1363
      cmdstr = " %s" % (cmd,)
1364
      help_text = commands[cmd][4]
1365
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1366
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1367
      for line in help_lines:
1368
        ToStdout("%-*s   %s", mlen, "", line)
1369

    
1370
    ToStdout("")
1371

    
1372
    return None, None, None
1373

    
1374
  # get command, unalias it, and look it up in commands
1375
  cmd = argv.pop(1)
1376
  if cmd in aliases:
1377
    if cmd in commands:
1378
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1379
                                   " command" % cmd)
1380

    
1381
    if aliases[cmd] not in commands:
1382
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1383
                                   " command '%s'" % (cmd, aliases[cmd]))
1384

    
1385
    cmd = aliases[cmd]
1386

    
1387
  if cmd in env_override:
1388
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1389
    env_args = os.environ.get(args_env_name)
1390
    if env_args:
1391
      argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1392

    
1393
  func, args_def, parser_opts, usage, description = commands[cmd]
1394
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1395
                        description=description,
1396
                        formatter=TitledHelpFormatter(),
1397
                        usage="%%prog %s %s" % (cmd, usage))
1398
  parser.disable_interspersed_args()
1399
  options, args = parser.parse_args(args=argv[1:])
1400

    
1401
  if not _CheckArguments(cmd, args_def, args):
1402
    return None, None, None
1403

    
1404
  return func, options, args
1405

    
1406

    
1407
def _CheckArguments(cmd, args_def, args):
1408
  """Verifies the arguments using the argument definition.
1409

1410
  Algorithm:
1411

1412
    1. Abort with error if values specified by user but none expected.
1413

1414
    1. For each argument in definition
1415

1416
      1. Keep running count of minimum number of values (min_count)
1417
      1. Keep running count of maximum number of values (max_count)
1418
      1. If it has an unlimited number of values
1419

1420
        1. Abort with error if it's not the last argument in the definition
1421

1422
    1. If last argument has limited number of values
1423

1424
      1. Abort with error if number of values doesn't match or is too large
1425

1426
    1. Abort with error if user didn't pass enough values (min_count)
1427

1428
  """
1429
  if args and not args_def:
1430
    ToStderr("Error: Command %s expects no arguments", cmd)
1431
    return False
1432

    
1433
  min_count = None
1434
  max_count = None
1435
  check_max = None
1436

    
1437
  last_idx = len(args_def) - 1
1438

    
1439
  for idx, arg in enumerate(args_def):
1440
    if min_count is None:
1441
      min_count = arg.min
1442
    elif arg.min is not None:
1443
      min_count += arg.min
1444

    
1445
    if max_count is None:
1446
      max_count = arg.max
1447
    elif arg.max is not None:
1448
      max_count += arg.max
1449

    
1450
    if idx == last_idx:
1451
      check_max = (arg.max is not None)
1452

    
1453
    elif arg.max is None:
1454
      raise errors.ProgrammerError("Only the last argument can have max=None")
1455

    
1456
  if check_max:
1457
    # Command with exact number of arguments
1458
    if (min_count is not None and max_count is not None and
1459
        min_count == max_count and len(args) != min_count):
1460
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1461
      return False
1462

    
1463
    # Command with limited number of arguments
1464
    if max_count is not None and len(args) > max_count:
1465
      ToStderr("Error: Command %s expects only %d argument(s)",
1466
               cmd, max_count)
1467
      return False
1468

    
1469
  # Command with some required arguments
1470
  if min_count is not None and len(args) < min_count:
1471
    ToStderr("Error: Command %s expects at least %d argument(s)",
1472
             cmd, min_count)
1473
    return False
1474

    
1475
  return True
1476

    
1477

    
1478
def SplitNodeOption(value):
1479
  """Splits the value of a --node option.
1480

1481
  """
1482
  if value and ":" in value:
1483
    return value.split(":", 1)
1484
  else:
1485
    return (value, None)
1486

    
1487

    
1488
def CalculateOSNames(os_name, os_variants):
1489
  """Calculates all the names an OS can be called, according to its variants.
1490

1491
  @type os_name: string
1492
  @param os_name: base name of the os
1493
  @type os_variants: list or None
1494
  @param os_variants: list of supported variants
1495
  @rtype: list
1496
  @return: list of valid names
1497

1498
  """
1499
  if os_variants:
1500
    return ["%s+%s" % (os_name, v) for v in os_variants]
1501
  else:
1502
    return [os_name]
1503

    
1504

    
1505
def ParseFields(selected, default):
1506
  """Parses the values of "--field"-like options.
1507

1508
  @type selected: string or None
1509
  @param selected: User-selected options
1510
  @type default: list
1511
  @param default: Default fields
1512

1513
  """
1514
  if selected is None:
1515
    return default
1516

    
1517
  if selected.startswith("+"):
1518
    return default + selected[1:].split(",")
1519

    
1520
  return selected.split(",")
1521

    
1522

    
1523
UsesRPC = rpc.RunWithRPC
1524

    
1525

    
1526
def AskUser(text, choices=None):
1527
  """Ask the user a question.
1528

1529
  @param text: the question to ask
1530

1531
  @param choices: list with elements tuples (input_char, return_value,
1532
      description); if not given, it will default to: [('y', True,
1533
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1534
      note that the '?' char is reserved for help
1535

1536
  @return: one of the return values from the choices list; if input is
1537
      not possible (i.e. not running with a tty, we return the last
1538
      entry from the list
1539

1540
  """
1541
  if choices is None:
1542
    choices = [("y", True, "Perform the operation"),
1543
               ("n", False, "Do not perform the operation")]
1544
  if not choices or not isinstance(choices, list):
1545
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1546
  for entry in choices:
1547
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1548
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1549

    
1550
  answer = choices[-1][1]
1551
  new_text = []
1552
  for line in text.splitlines():
1553
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1554
  text = "\n".join(new_text)
1555
  try:
1556
    f = file("/dev/tty", "a+")
1557
  except IOError:
1558
    return answer
1559
  try:
1560
    chars = [entry[0] for entry in choices]
1561
    chars[-1] = "[%s]" % chars[-1]
1562
    chars.append("?")
1563
    maps = dict([(entry[0], entry[1]) for entry in choices])
1564
    while True:
1565
      f.write(text)
1566
      f.write("\n")
1567
      f.write("/".join(chars))
1568
      f.write(": ")
1569
      line = f.readline(2).strip().lower()
1570
      if line in maps:
1571
        answer = maps[line]
1572
        break
1573
      elif line == "?":
1574
        for entry in choices:
1575
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1576
        f.write("\n")
1577
        continue
1578
  finally:
1579
    f.close()
1580
  return answer
1581

    
1582

    
1583
class JobSubmittedException(Exception):
1584
  """Job was submitted, client should exit.
1585

1586
  This exception has one argument, the ID of the job that was
1587
  submitted. The handler should print this ID.
1588

1589
  This is not an error, just a structured way to exit from clients.
1590

1591
  """
1592

    
1593

    
1594
def SendJob(ops, cl=None):
1595
  """Function to submit an opcode without waiting for the results.
1596

1597
  @type ops: list
1598
  @param ops: list of opcodes
1599
  @type cl: luxi.Client
1600
  @param cl: the luxi client to use for communicating with the master;
1601
             if None, a new client will be created
1602

1603
  """
1604
  if cl is None:
1605
    cl = GetClient()
1606

    
1607
  job_id = cl.SubmitJob(ops)
1608

    
1609
  return job_id
1610

    
1611

    
1612
def GenericPollJob(job_id, cbs, report_cbs):
1613
  """Generic job-polling function.
1614

1615
  @type job_id: number
1616
  @param job_id: Job ID
1617
  @type cbs: Instance of L{JobPollCbBase}
1618
  @param cbs: Data callbacks
1619
  @type report_cbs: Instance of L{JobPollReportCbBase}
1620
  @param report_cbs: Reporting callbacks
1621

1622
  """
1623
  prev_job_info = None
1624
  prev_logmsg_serial = None
1625

    
1626
  status = None
1627

    
1628
  while True:
1629
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1630
                                      prev_logmsg_serial)
1631
    if not result:
1632
      # job not found, go away!
1633
      raise errors.JobLost("Job with id %s lost" % job_id)
1634

    
1635
    if result == constants.JOB_NOTCHANGED:
1636
      report_cbs.ReportNotChanged(job_id, status)
1637

    
1638
      # Wait again
1639
      continue
1640

    
1641
    # Split result, a tuple of (field values, log entries)
1642
    (job_info, log_entries) = result
1643
    (status, ) = job_info
1644

    
1645
    if log_entries:
1646
      for log_entry in log_entries:
1647
        (serial, timestamp, log_type, message) = log_entry
1648
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1649
                                    log_type, message)
1650
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1651

    
1652
    # TODO: Handle canceled and archived jobs
1653
    elif status in (constants.JOB_STATUS_SUCCESS,
1654
                    constants.JOB_STATUS_ERROR,
1655
                    constants.JOB_STATUS_CANCELING,
1656
                    constants.JOB_STATUS_CANCELED):
1657
      break
1658

    
1659
    prev_job_info = job_info
1660

    
1661
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1662
  if not jobs:
1663
    raise errors.JobLost("Job with id %s lost" % job_id)
1664

    
1665
  status, opstatus, result = jobs[0]
1666

    
1667
  if status == constants.JOB_STATUS_SUCCESS:
1668
    return result
1669

    
1670
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1671
    raise errors.OpExecError("Job was canceled")
1672

    
1673
  has_ok = False
1674
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1675
    if status == constants.OP_STATUS_SUCCESS:
1676
      has_ok = True
1677
    elif status == constants.OP_STATUS_ERROR:
1678
      errors.MaybeRaise(msg)
1679

    
1680
      if has_ok:
1681
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1682
                                 (idx, msg))
1683

    
1684
      raise errors.OpExecError(str(msg))
1685

    
1686
  # default failure mode
1687
  raise errors.OpExecError(result)
1688

    
1689

    
1690
class JobPollCbBase:
1691
  """Base class for L{GenericPollJob} callbacks.
1692

1693
  """
1694
  def __init__(self):
1695
    """Initializes this class.
1696

1697
    """
1698

    
1699
  def WaitForJobChangeOnce(self, job_id, fields,
1700
                           prev_job_info, prev_log_serial):
1701
    """Waits for changes on a job.
1702

1703
    """
1704
    raise NotImplementedError()
1705

    
1706
  def QueryJobs(self, job_ids, fields):
1707
    """Returns the selected fields for the selected job IDs.
1708

1709
    @type job_ids: list of numbers
1710
    @param job_ids: Job IDs
1711
    @type fields: list of strings
1712
    @param fields: Fields
1713

1714
    """
1715
    raise NotImplementedError()
1716

    
1717

    
1718
class JobPollReportCbBase:
1719
  """Base class for L{GenericPollJob} reporting callbacks.
1720

1721
  """
1722
  def __init__(self):
1723
    """Initializes this class.
1724

1725
    """
1726

    
1727
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1728
    """Handles a log message.
1729

1730
    """
1731
    raise NotImplementedError()
1732

    
1733
  def ReportNotChanged(self, job_id, status):
1734
    """Called for if a job hasn't changed in a while.
1735

1736
    @type job_id: number
1737
    @param job_id: Job ID
1738
    @type status: string or None
1739
    @param status: Job status if available
1740

1741
    """
1742
    raise NotImplementedError()
1743

    
1744

    
1745
class _LuxiJobPollCb(JobPollCbBase):
1746
  def __init__(self, cl):
1747
    """Initializes this class.
1748

1749
    """
1750
    JobPollCbBase.__init__(self)
1751
    self.cl = cl
1752

    
1753
  def WaitForJobChangeOnce(self, job_id, fields,
1754
                           prev_job_info, prev_log_serial):
1755
    """Waits for changes on a job.
1756

1757
    """
1758
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1759
                                        prev_job_info, prev_log_serial)
1760

    
1761
  def QueryJobs(self, job_ids, fields):
1762
    """Returns the selected fields for the selected job IDs.
1763

1764
    """
1765
    return self.cl.QueryJobs(job_ids, fields)
1766

    
1767

    
1768
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1769
  def __init__(self, feedback_fn):
1770
    """Initializes this class.
1771

1772
    """
1773
    JobPollReportCbBase.__init__(self)
1774

    
1775
    self.feedback_fn = feedback_fn
1776

    
1777
    assert callable(feedback_fn)
1778

    
1779
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1780
    """Handles a log message.
1781

1782
    """
1783
    self.feedback_fn((timestamp, log_type, log_msg))
1784

    
1785
  def ReportNotChanged(self, job_id, status):
1786
    """Called if a job hasn't changed in a while.
1787

1788
    """
1789
    # Ignore
1790

    
1791

    
1792
class StdioJobPollReportCb(JobPollReportCbBase):
1793
  def __init__(self):
1794
    """Initializes this class.
1795

1796
    """
1797
    JobPollReportCbBase.__init__(self)
1798

    
1799
    self.notified_queued = False
1800
    self.notified_waitlock = False
1801

    
1802
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1803
    """Handles a log message.
1804

1805
    """
1806
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1807
             FormatLogMessage(log_type, log_msg))
1808

    
1809
  def ReportNotChanged(self, job_id, status):
1810
    """Called if a job hasn't changed in a while.
1811

1812
    """
1813
    if status is None:
1814
      return
1815

    
1816
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1817
      ToStderr("Job %s is waiting in queue", job_id)
1818
      self.notified_queued = True
1819

    
1820
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1821
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1822
      self.notified_waitlock = True
1823

    
1824

    
1825
def FormatLogMessage(log_type, log_msg):
1826
  """Formats a job message according to its type.
1827

1828
  """
1829
  if log_type != constants.ELOG_MESSAGE:
1830
    log_msg = str(log_msg)
1831

    
1832
  return utils.SafeEncode(log_msg)
1833

    
1834

    
1835
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1836
  """Function to poll for the result of a job.
1837

1838
  @type job_id: job identified
1839
  @param job_id: the job to poll for results
1840
  @type cl: luxi.Client
1841
  @param cl: the luxi client to use for communicating with the master;
1842
             if None, a new client will be created
1843

1844
  """
1845
  if cl is None:
1846
    cl = GetClient()
1847

    
1848
  if reporter is None:
1849
    if feedback_fn:
1850
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1851
    else:
1852
      reporter = StdioJobPollReportCb()
1853
  elif feedback_fn:
1854
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1855

    
1856
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1857

    
1858

    
1859
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1860
  """Legacy function to submit an opcode.
1861

1862
  This is just a simple wrapper over the construction of the processor
1863
  instance. It should be extended to better handle feedback and
1864
  interaction functions.
1865

1866
  """
1867
  if cl is None:
1868
    cl = GetClient()
1869

    
1870
  SetGenericOpcodeOpts([op], opts)
1871

    
1872
  job_id = SendJob([op], cl=cl)
1873

    
1874
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1875
                       reporter=reporter)
1876

    
1877
  return op_results[0]
1878

    
1879

    
1880
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1881
  """Wrapper around SubmitOpCode or SendJob.
1882

1883
  This function will decide, based on the 'opts' parameter, whether to
1884
  submit and wait for the result of the opcode (and return it), or
1885
  whether to just send the job and print its identifier. It is used in
1886
  order to simplify the implementation of the '--submit' option.
1887

1888
  It will also process the opcodes if we're sending the via SendJob
1889
  (otherwise SubmitOpCode does it).
1890

1891
  """
1892
  if opts and opts.submit_only:
1893
    job = [op]
1894
    SetGenericOpcodeOpts(job, opts)
1895
    job_id = SendJob(job, cl=cl)
1896
    raise JobSubmittedException(job_id)
1897
  else:
1898
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1899

    
1900

    
1901
def SetGenericOpcodeOpts(opcode_list, options):
1902
  """Processor for generic options.
1903

1904
  This function updates the given opcodes based on generic command
1905
  line options (like debug, dry-run, etc.).
1906

1907
  @param opcode_list: list of opcodes
1908
  @param options: command line options or None
1909
  @return: None (in-place modification)
1910

1911
  """
1912
  if not options:
1913
    return
1914
  for op in opcode_list:
1915
    op.debug_level = options.debug
1916
    if hasattr(options, "dry_run"):
1917
      op.dry_run = options.dry_run
1918
    if getattr(options, "priority", None) is not None:
1919
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1920

    
1921

    
1922
def GetClient():
1923
  # TODO: Cache object?
1924
  try:
1925
    client = luxi.Client()
1926
  except luxi.NoMasterError:
1927
    ss = ssconf.SimpleStore()
1928

    
1929
    # Try to read ssconf file
1930
    try:
1931
      ss.GetMasterNode()
1932
    except errors.ConfigurationError:
1933
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1934
                                 " not part of a cluster")
1935

    
1936
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1937
    if master != myself:
1938
      raise errors.OpPrereqError("This is not the master node, please connect"
1939
                                 " to node '%s' and rerun the command" %
1940
                                 master)
1941
    raise
1942
  return client
1943

    
1944

    
1945
def FormatError(err):
1946
  """Return a formatted error message for a given error.
1947

1948
  This function takes an exception instance and returns a tuple
1949
  consisting of two values: first, the recommended exit code, and
1950
  second, a string describing the error message (not
1951
  newline-terminated).
1952

1953
  """
1954
  retcode = 1
1955
  obuf = StringIO()
1956
  msg = str(err)
1957
  if isinstance(err, errors.ConfigurationError):
1958
    txt = "Corrupt configuration file: %s" % msg
1959
    logging.error(txt)
1960
    obuf.write(txt + "\n")
1961
    obuf.write("Aborting.")
1962
    retcode = 2
1963
  elif isinstance(err, errors.HooksAbort):
1964
    obuf.write("Failure: hooks execution failed:\n")
1965
    for node, script, out in err.args[0]:
1966
      if out:
1967
        obuf.write("  node: %s, script: %s, output: %s\n" %
1968
                   (node, script, out))
1969
      else:
1970
        obuf.write("  node: %s, script: %s (no output)\n" %
1971
                   (node, script))
1972
  elif isinstance(err, errors.HooksFailure):
1973
    obuf.write("Failure: hooks general failure: %s" % msg)
1974
  elif isinstance(err, errors.ResolverError):
1975
    this_host = netutils.Hostname.GetSysName()
1976
    if err.args[0] == this_host:
1977
      msg = "Failure: can't resolve my own hostname ('%s')"
1978
    else:
1979
      msg = "Failure: can't resolve hostname '%s'"
1980
    obuf.write(msg % err.args[0])
1981
  elif isinstance(err, errors.OpPrereqError):
1982
    if len(err.args) == 2:
1983
      obuf.write("Failure: prerequisites not met for this"
1984
               " operation:\nerror type: %s, error details:\n%s" %
1985
                 (err.args[1], err.args[0]))
1986
    else:
1987
      obuf.write("Failure: prerequisites not met for this"
1988
                 " operation:\n%s" % msg)
1989
  elif isinstance(err, errors.OpExecError):
1990
    obuf.write("Failure: command execution error:\n%s" % msg)
1991
  elif isinstance(err, errors.TagError):
1992
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1993
  elif isinstance(err, errors.JobQueueDrainError):
1994
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1995
               " accept new requests\n")
1996
  elif isinstance(err, errors.JobQueueFull):
1997
    obuf.write("Failure: the job queue is full and doesn't accept new"
1998
               " job submissions until old jobs are archived\n")
1999
  elif isinstance(err, errors.TypeEnforcementError):
2000
    obuf.write("Parameter Error: %s" % msg)
2001
  elif isinstance(err, errors.ParameterError):
2002
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2003
  elif isinstance(err, luxi.NoMasterError):
2004
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2005
               " and listening for connections?")
2006
  elif isinstance(err, luxi.TimeoutError):
2007
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2008
               " been submitted and will continue to run even if the call"
2009
               " timed out. Useful commands in this situation are \"gnt-job"
2010
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2011
    obuf.write(msg)
2012
  elif isinstance(err, luxi.PermissionError):
2013
    obuf.write("It seems you don't have permissions to connect to the"
2014
               " master daemon.\nPlease retry as a different user.")
2015
  elif isinstance(err, luxi.ProtocolError):
2016
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2017
               "%s" % msg)
2018
  elif isinstance(err, errors.JobLost):
2019
    obuf.write("Error checking job status: %s" % msg)
2020
  elif isinstance(err, errors.QueryFilterParseError):
2021
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2022
    obuf.write("\n".join(err.GetDetails()))
2023
  elif isinstance(err, errors.GenericError):
2024
    obuf.write("Unhandled Ganeti error: %s" % msg)
2025
  elif isinstance(err, JobSubmittedException):
2026
    obuf.write("JobID: %s\n" % err.args[0])
2027
    retcode = 0
2028
  else:
2029
    obuf.write("Unhandled exception: %s" % msg)
2030
  return retcode, obuf.getvalue().rstrip("\n")
2031

    
2032

    
2033
def GenericMain(commands, override=None, aliases=None,
2034
                env_override=frozenset()):
2035
  """Generic main function for all the gnt-* commands.
2036

2037
  @param commands: a dictionary with a special structure, see the design doc
2038
                   for command line handling.
2039
  @param override: if not None, we expect a dictionary with keys that will
2040
                   override command line options; this can be used to pass
2041
                   options from the scripts to generic functions
2042
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2043
  @param env_override: list of environment names which are allowed to submit
2044
                       default args for commands
2045

2046
  """
2047
  # save the program name and the entire command line for later logging
2048
  if sys.argv:
2049
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2050
    if len(sys.argv) >= 2:
2051
      binary += " " + sys.argv[1]
2052
      old_cmdline = " ".join(sys.argv[2:])
2053
    else:
2054
      old_cmdline = ""
2055
  else:
2056
    binary = "<unknown program>"
2057
    old_cmdline = ""
2058

    
2059
  if aliases is None:
2060
    aliases = {}
2061

    
2062
  try:
2063
    func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2064
  except errors.ParameterError, err:
2065
    result, err_msg = FormatError(err)
2066
    ToStderr(err_msg)
2067
    return 1
2068

    
2069
  if func is None: # parse error
2070
    return 1
2071

    
2072
  if override is not None:
2073
    for key, val in override.iteritems():
2074
      setattr(options, key, val)
2075

    
2076
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2077
                     stderr_logging=True)
2078

    
2079
  if old_cmdline:
2080
    logging.info("run with arguments '%s'", old_cmdline)
2081
  else:
2082
    logging.info("run with no arguments")
2083

    
2084
  try:
2085
    result = func(options, args)
2086
  except (errors.GenericError, luxi.ProtocolError,
2087
          JobSubmittedException), err:
2088
    result, err_msg = FormatError(err)
2089
    logging.exception("Error during command processing")
2090
    ToStderr(err_msg)
2091
  except KeyboardInterrupt:
2092
    result = constants.EXIT_FAILURE
2093
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2094
             " might have been submitted and"
2095
             " will continue to run in the background.")
2096
  except IOError, err:
2097
    if err.errno == errno.EPIPE:
2098
      # our terminal went away, we'll exit
2099
      sys.exit(constants.EXIT_FAILURE)
2100
    else:
2101
      raise
2102

    
2103
  return result
2104

    
2105

    
2106
def ParseNicOption(optvalue):
2107
  """Parses the value of the --net option(s).
2108

2109
  """
2110
  try:
2111
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2112
  except (TypeError, ValueError), err:
2113
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2114

    
2115
  nics = [{}] * nic_max
2116
  for nidx, ndict in optvalue:
2117
    nidx = int(nidx)
2118

    
2119
    if not isinstance(ndict, dict):
2120
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2121
                                 " got %s" % (nidx, ndict))
2122

    
2123
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2124

    
2125
    nics[nidx] = ndict
2126

    
2127
  return nics
2128

    
2129

    
2130
def GenericInstanceCreate(mode, opts, args):
2131
  """Add an instance to the cluster via either creation or import.
2132

2133
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2134
  @param opts: the command line options selected by the user
2135
  @type args: list
2136
  @param args: should contain only one element, the new instance name
2137
  @rtype: int
2138
  @return: the desired exit code
2139

2140
  """
2141
  instance = args[0]
2142

    
2143
  (pnode, snode) = SplitNodeOption(opts.node)
2144

    
2145
  hypervisor = None
2146
  hvparams = {}
2147
  if opts.hypervisor:
2148
    hypervisor, hvparams = opts.hypervisor
2149

    
2150
  if opts.nics:
2151
    nics = ParseNicOption(opts.nics)
2152
  elif opts.no_nics:
2153
    # no nics
2154
    nics = []
2155
  elif mode == constants.INSTANCE_CREATE:
2156
    # default of one nic, all auto
2157
    nics = [{}]
2158
  else:
2159
    # mode == import
2160
    nics = []
2161

    
2162
  if opts.disk_template == constants.DT_DISKLESS:
2163
    if opts.disks or opts.sd_size is not None:
2164
      raise errors.OpPrereqError("Diskless instance but disk"
2165
                                 " information passed")
2166
    disks = []
2167
  else:
2168
    if (not opts.disks and not opts.sd_size
2169
        and mode == constants.INSTANCE_CREATE):
2170
      raise errors.OpPrereqError("No disk information specified")
2171
    if opts.disks and opts.sd_size is not None:
2172
      raise errors.OpPrereqError("Please use either the '--disk' or"
2173
                                 " '-s' option")
2174
    if opts.sd_size is not None:
2175
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2176

    
2177
    if opts.disks:
2178
      try:
2179
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2180
      except ValueError, err:
2181
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2182
      disks = [{}] * disk_max
2183
    else:
2184
      disks = []
2185
    for didx, ddict in opts.disks:
2186
      didx = int(didx)
2187
      if not isinstance(ddict, dict):
2188
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2189
        raise errors.OpPrereqError(msg)
2190
      elif constants.IDISK_SIZE in ddict:
2191
        if constants.IDISK_ADOPT in ddict:
2192
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2193
                                     " (disk %d)" % didx)
2194
        try:
2195
          ddict[constants.IDISK_SIZE] = \
2196
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2197
        except ValueError, err:
2198
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2199
                                     (didx, err))
2200
      elif constants.IDISK_ADOPT in ddict:
2201
        if mode == constants.INSTANCE_IMPORT:
2202
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2203
                                     " import")
2204
        ddict[constants.IDISK_SIZE] = 0
2205
      else:
2206
        raise errors.OpPrereqError("Missing size or adoption source for"
2207
                                   " disk %d" % didx)
2208
      disks[didx] = ddict
2209

    
2210
  if opts.tags is not None:
2211
    tags = opts.tags.split(",")
2212
  else:
2213
    tags = []
2214

    
2215
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2216
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2217

    
2218
  if mode == constants.INSTANCE_CREATE:
2219
    start = opts.start
2220
    os_type = opts.os
2221
    force_variant = opts.force_variant
2222
    src_node = None
2223
    src_path = None
2224
    no_install = opts.no_install
2225
    identify_defaults = False
2226
  elif mode == constants.INSTANCE_IMPORT:
2227
    start = False
2228
    os_type = None
2229
    force_variant = False
2230
    src_node = opts.src_node
2231
    src_path = opts.src_dir
2232
    no_install = None
2233
    identify_defaults = opts.identify_defaults
2234
  else:
2235
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2236

    
2237
  op = opcodes.OpInstanceCreate(instance_name=instance,
2238
                                disks=disks,
2239
                                disk_template=opts.disk_template,
2240
                                nics=nics,
2241
                                pnode=pnode, snode=snode,
2242
                                ip_check=opts.ip_check,
2243
                                name_check=opts.name_check,
2244
                                wait_for_sync=opts.wait_for_sync,
2245
                                file_storage_dir=opts.file_storage_dir,
2246
                                file_driver=opts.file_driver,
2247
                                iallocator=opts.iallocator,
2248
                                hypervisor=hypervisor,
2249
                                hvparams=hvparams,
2250
                                beparams=opts.beparams,
2251
                                osparams=opts.osparams,
2252
                                mode=mode,
2253
                                start=start,
2254
                                os_type=os_type,
2255
                                force_variant=force_variant,
2256
                                src_node=src_node,
2257
                                src_path=src_path,
2258
                                tags=tags,
2259
                                no_install=no_install,
2260
                                identify_defaults=identify_defaults)
2261

    
2262
  SubmitOrSend(op, opts)
2263
  return 0
2264

    
2265

    
2266
class _RunWhileClusterStoppedHelper:
2267
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2268

2269
  """
2270
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2271
    """Initializes this class.
2272

2273
    @type feedback_fn: callable
2274
    @param feedback_fn: Feedback function
2275
    @type cluster_name: string
2276
    @param cluster_name: Cluster name
2277
    @type master_node: string
2278
    @param master_node Master node name
2279
    @type online_nodes: list
2280
    @param online_nodes: List of names of online nodes
2281

2282
    """
2283
    self.feedback_fn = feedback_fn
2284
    self.cluster_name = cluster_name
2285
    self.master_node = master_node
2286
    self.online_nodes = online_nodes
2287

    
2288
    self.ssh = ssh.SshRunner(self.cluster_name)
2289

    
2290
    self.nonmaster_nodes = [name for name in online_nodes
2291
                            if name != master_node]
2292

    
2293
    assert self.master_node not in self.nonmaster_nodes
2294

    
2295
  def _RunCmd(self, node_name, cmd):
2296
    """Runs a command on the local or a remote machine.
2297

2298
    @type node_name: string
2299
    @param node_name: Machine name
2300
    @type cmd: list
2301
    @param cmd: Command
2302

2303
    """
2304
    if node_name is None or node_name == self.master_node:
2305
      # No need to use SSH
2306
      result = utils.RunCmd(cmd)
2307
    else:
2308
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2309

    
2310
    if result.failed:
2311
      errmsg = ["Failed to run command %s" % result.cmd]
2312
      if node_name:
2313
        errmsg.append("on node %s" % node_name)
2314
      errmsg.append(": exitcode %s and error %s" %
2315
                    (result.exit_code, result.output))
2316
      raise errors.OpExecError(" ".join(errmsg))
2317

    
2318
  def Call(self, fn, *args):
2319
    """Call function while all daemons are stopped.
2320

2321
    @type fn: callable
2322
    @param fn: Function to be called
2323

2324
    """
2325
    # Pause watcher by acquiring an exclusive lock on watcher state file
2326
    self.feedback_fn("Blocking watcher")
2327
    watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2328
    try:
2329
      # TODO: Currently, this just blocks. There's no timeout.
2330
      # TODO: Should it be a shared lock?
2331
      watcher_block.Exclusive(blocking=True)
2332

    
2333
      # Stop master daemons, so that no new jobs can come in and all running
2334
      # ones are finished
2335
      self.feedback_fn("Stopping master daemons")
2336
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2337
      try:
2338
        # Stop daemons on all nodes
2339
        for node_name in self.online_nodes:
2340
          self.feedback_fn("Stopping daemons on %s" % node_name)
2341
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2342

    
2343
        # All daemons are shut down now
2344
        try:
2345
          return fn(self, *args)
2346
        except Exception, err:
2347
          _, errmsg = FormatError(err)
2348
          logging.exception("Caught exception")
2349
          self.feedback_fn(errmsg)
2350
          raise
2351
      finally:
2352
        # Start cluster again, master node last
2353
        for node_name in self.nonmaster_nodes + [self.master_node]:
2354
          self.feedback_fn("Starting daemons on %s" % node_name)
2355
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2356
    finally:
2357
      # Resume watcher
2358
      watcher_block.Close()
2359

    
2360

    
2361
def RunWhileClusterStopped(feedback_fn, fn, *args):
2362
  """Calls a function while all cluster daemons are stopped.
2363

2364
  @type feedback_fn: callable
2365
  @param feedback_fn: Feedback function
2366
  @type fn: callable
2367
  @param fn: Function to be called when daemons are stopped
2368

2369
  """
2370
  feedback_fn("Gathering cluster information")
2371

    
2372
  # This ensures we're running on the master daemon
2373
  cl = GetClient()
2374

    
2375
  (cluster_name, master_node) = \
2376
    cl.QueryConfigValues(["cluster_name", "master_node"])
2377

    
2378
  online_nodes = GetOnlineNodes([], cl=cl)
2379

    
2380
  # Don't keep a reference to the client. The master daemon will go away.
2381
  del cl
2382

    
2383
  assert master_node in online_nodes
2384

    
2385
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2386
                                       online_nodes).Call(fn, *args)
2387

    
2388

    
2389
def GenerateTable(headers, fields, separator, data,
2390
                  numfields=None, unitfields=None,
2391
                  units=None):
2392
  """Prints a table with headers and different fields.
2393

2394
  @type headers: dict
2395
  @param headers: dictionary mapping field names to headers for
2396
      the table
2397
  @type fields: list
2398
  @param fields: the field names corresponding to each row in
2399
      the data field
2400
  @param separator: the separator to be used; if this is None,
2401
      the default 'smart' algorithm is used which computes optimal
2402
      field width, otherwise just the separator is used between
2403
      each field
2404
  @type data: list
2405
  @param data: a list of lists, each sublist being one row to be output
2406
  @type numfields: list
2407
  @param numfields: a list with the fields that hold numeric
2408
      values and thus should be right-aligned
2409
  @type unitfields: list
2410
  @param unitfields: a list with the fields that hold numeric
2411
      values that should be formatted with the units field
2412
  @type units: string or None
2413
  @param units: the units we should use for formatting, or None for
2414
      automatic choice (human-readable for non-separator usage, otherwise
2415
      megabytes); this is a one-letter string
2416

2417
  """
2418
  if units is None:
2419
    if separator:
2420
      units = "m"
2421
    else:
2422
      units = "h"
2423

    
2424
  if numfields is None:
2425
    numfields = []
2426
  if unitfields is None:
2427
    unitfields = []
2428

    
2429
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2430
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2431

    
2432
  format_fields = []
2433
  for field in fields:
2434
    if headers and field not in headers:
2435
      # TODO: handle better unknown fields (either revert to old
2436
      # style of raising exception, or deal more intelligently with
2437
      # variable fields)
2438
      headers[field] = field
2439
    if separator is not None:
2440
      format_fields.append("%s")
2441
    elif numfields.Matches(field):
2442
      format_fields.append("%*s")
2443
    else:
2444
      format_fields.append("%-*s")
2445

    
2446
  if separator is None:
2447
    mlens = [0 for name in fields]
2448
    format_str = " ".join(format_fields)
2449
  else:
2450
    format_str = separator.replace("%", "%%").join(format_fields)
2451

    
2452
  for row in data:
2453
    if row is None:
2454
      continue
2455
    for idx, val in enumerate(row):
2456
      if unitfields.Matches(fields[idx]):
2457
        try:
2458
          val = int(val)
2459
        except (TypeError, ValueError):
2460
          pass
2461
        else:
2462
          val = row[idx] = utils.FormatUnit(val, units)
2463
      val = row[idx] = str(val)
2464
      if separator is None:
2465
        mlens[idx] = max(mlens[idx], len(val))
2466

    
2467
  result = []
2468
  if headers:
2469
    args = []
2470
    for idx, name in enumerate(fields):
2471
      hdr = headers[name]
2472
      if separator is None:
2473
        mlens[idx] = max(mlens[idx], len(hdr))
2474
        args.append(mlens[idx])
2475
      args.append(hdr)
2476
    result.append(format_str % tuple(args))
2477

    
2478
  if separator is None:
2479
    assert len(mlens) == len(fields)
2480

    
2481
    if fields and not numfields.Matches(fields[-1]):
2482
      mlens[-1] = 0
2483

    
2484
  for line in data:
2485
    args = []
2486
    if line is None:
2487
      line = ["-" for _ in fields]
2488
    for idx in range(len(fields)):
2489
      if separator is None:
2490
        args.append(mlens[idx])
2491
      args.append(line[idx])
2492
    result.append(format_str % tuple(args))
2493

    
2494
  return result
2495

    
2496

    
2497
def _FormatBool(value):
2498
  """Formats a boolean value as a string.
2499

2500
  """
2501
  if value:
2502
    return "Y"
2503
  return "N"
2504

    
2505

    
2506
#: Default formatting for query results; (callback, align right)
2507
_DEFAULT_FORMAT_QUERY = {
2508
  constants.QFT_TEXT: (str, False),
2509
  constants.QFT_BOOL: (_FormatBool, False),
2510
  constants.QFT_NUMBER: (str, True),
2511
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2512
  constants.QFT_OTHER: (str, False),
2513
  constants.QFT_UNKNOWN: (str, False),
2514
  }
2515

    
2516

    
2517
def _GetColumnFormatter(fdef, override, unit):
2518
  """Returns formatting function for a field.
2519

2520
  @type fdef: L{objects.QueryFieldDefinition}
2521
  @type override: dict
2522
  @param override: Dictionary for overriding field formatting functions,
2523
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2524
  @type unit: string
2525
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2526
  @rtype: tuple; (callable, bool)
2527
  @return: Returns the function to format a value (takes one parameter) and a
2528
    boolean for aligning the value on the right-hand side
2529

2530
  """
2531
  fmt = override.get(fdef.name, None)
2532
  if fmt is not None:
2533
    return fmt
2534

    
2535
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2536

    
2537
  if fdef.kind == constants.QFT_UNIT:
2538
    # Can't keep this information in the static dictionary
2539
    return (lambda value: utils.FormatUnit(value, unit), True)
2540

    
2541
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2542
  if fmt is not None:
2543
    return fmt
2544

    
2545
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2546

    
2547

    
2548
class _QueryColumnFormatter:
2549
  """Callable class for formatting fields of a query.
2550

2551
  """
2552
  def __init__(self, fn, status_fn, verbose):
2553
    """Initializes this class.
2554

2555
    @type fn: callable
2556
    @param fn: Formatting function
2557
    @type status_fn: callable
2558
    @param status_fn: Function to report fields' status
2559
    @type verbose: boolean
2560
    @param verbose: whether to use verbose field descriptions or not
2561

2562
    """
2563
    self._fn = fn
2564
    self._status_fn = status_fn
2565
    self._verbose = verbose
2566

    
2567
  def __call__(self, data):
2568
    """Returns a field's string representation.
2569

2570
    """
2571
    (status, value) = data
2572

    
2573
    # Report status
2574
    self._status_fn(status)
2575

    
2576
    if status == constants.RS_NORMAL:
2577
      return self._fn(value)
2578

    
2579
    assert value is None, \
2580
           "Found value %r for abnormal status %s" % (value, status)
2581

    
2582
    return FormatResultError(status, self._verbose)
2583

    
2584

    
2585
def FormatResultError(status, verbose):
2586
  """Formats result status other than L{constants.RS_NORMAL}.
2587

2588
  @param status: The result status
2589
  @type verbose: boolean
2590
  @param verbose: Whether to return the verbose text
2591
  @return: Text of result status
2592

2593
  """
2594
  assert status != constants.RS_NORMAL, \
2595
         "FormatResultError called with status equal to constants.RS_NORMAL"
2596
  try:
2597
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2598
  except KeyError:
2599
    raise NotImplementedError("Unknown status %s" % status)
2600
  else:
2601
    if verbose:
2602
      return verbose_text
2603
    return normal_text
2604

    
2605

    
2606
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2607
                      header=False, verbose=False):
2608
  """Formats data in L{objects.QueryResponse}.
2609

2610
  @type result: L{objects.QueryResponse}
2611
  @param result: result of query operation
2612
  @type unit: string
2613
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2614
    see L{utils.text.FormatUnit}
2615
  @type format_override: dict
2616
  @param format_override: Dictionary for overriding field formatting functions,
2617
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2618
  @type separator: string or None
2619
  @param separator: String used to separate fields
2620
  @type header: bool
2621
  @param header: Whether to output header row
2622
  @type verbose: boolean
2623
  @param verbose: whether to use verbose field descriptions or not
2624

2625
  """
2626
  if unit is None:
2627
    if separator:
2628
      unit = "m"
2629
    else:
2630
      unit = "h"
2631

    
2632
  if format_override is None:
2633
    format_override = {}
2634

    
2635
  stats = dict.fromkeys(constants.RS_ALL, 0)
2636

    
2637
  def _RecordStatus(status):
2638
    if status in stats:
2639
      stats[status] += 1
2640

    
2641
  columns = []
2642
  for fdef in result.fields:
2643
    assert fdef.title and fdef.name
2644
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2645
    columns.append(TableColumn(fdef.title,
2646
                               _QueryColumnFormatter(fn, _RecordStatus,
2647
                                                     verbose),
2648
                               align_right))
2649

    
2650
  table = FormatTable(result.data, columns, header, separator)
2651

    
2652
  # Collect statistics
2653
  assert len(stats) == len(constants.RS_ALL)
2654
  assert compat.all(count >= 0 for count in stats.values())
2655

    
2656
  # Determine overall status. If there was no data, unknown fields must be
2657
  # detected via the field definitions.
2658
  if (stats[constants.RS_UNKNOWN] or
2659
      (not result.data and _GetUnknownFields(result.fields))):
2660
    status = QR_UNKNOWN
2661
  elif compat.any(count > 0 for key, count in stats.items()
2662
                  if key != constants.RS_NORMAL):
2663
    status = QR_INCOMPLETE
2664
  else:
2665
    status = QR_NORMAL
2666

    
2667
  return (status, table)
2668

    
2669

    
2670
def _GetUnknownFields(fdefs):
2671
  """Returns list of unknown fields included in C{fdefs}.
2672

2673
  @type fdefs: list of L{objects.QueryFieldDefinition}
2674

2675
  """
2676
  return [fdef for fdef in fdefs
2677
          if fdef.kind == constants.QFT_UNKNOWN]
2678

    
2679

    
2680
def _WarnUnknownFields(fdefs):
2681
  """Prints a warning to stderr if a query included unknown fields.
2682

2683
  @type fdefs: list of L{objects.QueryFieldDefinition}
2684

2685
  """
2686
  unknown = _GetUnknownFields(fdefs)
2687
  if unknown:
2688
    ToStderr("Warning: Queried for unknown fields %s",
2689
             utils.CommaJoin(fdef.name for fdef in unknown))
2690
    return True
2691

    
2692
  return False
2693

    
2694

    
2695
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2696
                format_override=None, verbose=False, force_filter=False):
2697
  """Generic implementation for listing all items of a resource.
2698

2699
  @param resource: One of L{constants.QR_VIA_LUXI}
2700
  @type fields: list of strings
2701
  @param fields: List of fields to query for
2702
  @type names: list of strings
2703
  @param names: Names of items to query for
2704
  @type unit: string or None
2705
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2706
    None for automatic choice (human-readable for non-separator usage,
2707
    otherwise megabytes); this is a one-letter string
2708
  @type separator: string or None
2709
  @param separator: String used to separate fields
2710
  @type header: bool
2711
  @param header: Whether to show header row
2712
  @type force_filter: bool
2713
  @param force_filter: Whether to always treat names as filter
2714
  @type format_override: dict
2715
  @param format_override: Dictionary for overriding field formatting functions,
2716
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2717
  @type verbose: boolean
2718
  @param verbose: whether to use verbose field descriptions or not
2719

2720
  """
2721
  if not names:
2722
    names = None
2723

    
2724
  qfilter = qlang.MakeFilter(names, force_filter)
2725

    
2726
  if cl is None:
2727
    cl = GetClient()
2728

    
2729
  response = cl.Query(resource, fields, qfilter)
2730

    
2731
  found_unknown = _WarnUnknownFields(response.fields)
2732

    
2733
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2734
                                     header=header,
2735
                                     format_override=format_override,
2736
                                     verbose=verbose)
2737

    
2738
  for line in data:
2739
    ToStdout(line)
2740

    
2741
  assert ((found_unknown and status == QR_UNKNOWN) or
2742
          (not found_unknown and status != QR_UNKNOWN))
2743

    
2744
  if status == QR_UNKNOWN:
2745
    return constants.EXIT_UNKNOWN_FIELD
2746

    
2747
  # TODO: Should the list command fail if not all data could be collected?
2748
  return constants.EXIT_SUCCESS
2749

    
2750

    
2751
def GenericListFields(resource, fields, separator, header, cl=None):
2752
  """Generic implementation for listing fields for a resource.
2753

2754
  @param resource: One of L{constants.QR_VIA_LUXI}
2755
  @type fields: list of strings
2756
  @param fields: List of fields to query for
2757
  @type separator: string or None
2758
  @param separator: String used to separate fields
2759
  @type header: bool
2760
  @param header: Whether to show header row
2761

2762
  """
2763
  if cl is None:
2764
    cl = GetClient()
2765

    
2766
  if not fields:
2767
    fields = None
2768

    
2769
  response = cl.QueryFields(resource, fields)
2770

    
2771
  found_unknown = _WarnUnknownFields(response.fields)
2772

    
2773
  columns = [
2774
    TableColumn("Name", str, False),
2775
    TableColumn("Title", str, False),
2776
    TableColumn("Description", str, False),
2777
    ]
2778

    
2779
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2780

    
2781
  for line in FormatTable(rows, columns, header, separator):
2782
    ToStdout(line)
2783

    
2784
  if found_unknown:
2785
    return constants.EXIT_UNKNOWN_FIELD
2786

    
2787
  return constants.EXIT_SUCCESS
2788

    
2789

    
2790
class TableColumn:
2791
  """Describes a column for L{FormatTable}.
2792

2793
  """
2794
  def __init__(self, title, fn, align_right):
2795
    """Initializes this class.
2796

2797
    @type title: string
2798
    @param title: Column title
2799
    @type fn: callable
2800
    @param fn: Formatting function
2801
    @type align_right: bool
2802
    @param align_right: Whether to align values on the right-hand side
2803

2804
    """
2805
    self.title = title
2806
    self.format = fn
2807
    self.align_right = align_right
2808

    
2809

    
2810
def _GetColFormatString(width, align_right):
2811
  """Returns the format string for a field.
2812

2813
  """
2814
  if align_right:
2815
    sign = ""
2816
  else:
2817
    sign = "-"
2818

    
2819
  return "%%%s%ss" % (sign, width)
2820

    
2821

    
2822
def FormatTable(rows, columns, header, separator):
2823
  """Formats data as a table.
2824

2825
  @type rows: list of lists
2826
  @param rows: Row data, one list per row
2827
  @type columns: list of L{TableColumn}
2828
  @param columns: Column descriptions
2829
  @type header: bool
2830
  @param header: Whether to show header row
2831
  @type separator: string or None
2832
  @param separator: String used to separate columns
2833

2834
  """
2835
  if header:
2836
    data = [[col.title for col in columns]]
2837
    colwidth = [len(col.title) for col in columns]
2838
  else:
2839
    data = []
2840
    colwidth = [0 for _ in columns]
2841

    
2842
  # Format row data
2843
  for row in rows:
2844
    assert len(row) == len(columns)
2845

    
2846
    formatted = [col.format(value) for value, col in zip(row, columns)]
2847

    
2848
    if separator is None:
2849
      # Update column widths
2850
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2851
        # Modifying a list's items while iterating is fine
2852
        colwidth[idx] = max(oldwidth, len(value))
2853

    
2854
    data.append(formatted)
2855

    
2856
  if separator is not None:
2857
    # Return early if a separator is used
2858
    return [separator.join(row) for row in data]
2859

    
2860
  if columns and not columns[-1].align_right:
2861
    # Avoid unnecessary spaces at end of line
2862
    colwidth[-1] = 0
2863

    
2864
  # Build format string
2865
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2866
                  for col, width in zip(columns, colwidth)])
2867

    
2868
  return [fmt % tuple(row) for row in data]
2869

    
2870

    
2871
def FormatTimestamp(ts):
2872
  """Formats a given timestamp.
2873

2874
  @type ts: timestamp
2875
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2876

2877
  @rtype: string
2878
  @return: a string with the formatted timestamp
2879

2880
  """
2881
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2882
    return "?"
2883
  sec, usec = ts
2884
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2885

    
2886

    
2887
def ParseTimespec(value):
2888
  """Parse a time specification.
2889

2890
  The following suffixed will be recognized:
2891

2892
    - s: seconds
2893
    - m: minutes
2894
    - h: hours
2895
    - d: day
2896
    - w: weeks
2897

2898
  Without any suffix, the value will be taken to be in seconds.
2899

2900
  """
2901
  value = str(value)
2902
  if not value:
2903
    raise errors.OpPrereqError("Empty time specification passed")
2904
  suffix_map = {
2905
    "s": 1,
2906
    "m": 60,
2907
    "h": 3600,
2908
    "d": 86400,
2909
    "w": 604800,
2910
    }
2911
  if value[-1] not in suffix_map:
2912
    try:
2913
      value = int(value)
2914
    except (TypeError, ValueError):
2915
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2916
  else:
2917
    multiplier = suffix_map[value[-1]]
2918
    value = value[:-1]
2919
    if not value: # no data left after stripping the suffix
2920
      raise errors.OpPrereqError("Invalid time specification (only"
2921
                                 " suffix passed)")
2922
    try:
2923
      value = int(value) * multiplier
2924
    except (TypeError, ValueError):
2925
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2926
  return value
2927

    
2928

    
2929
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2930
                   filter_master=False, nodegroup=None):
2931
  """Returns the names of online nodes.
2932

2933
  This function will also log a warning on stderr with the names of
2934
  the online nodes.
2935

2936
  @param nodes: if not empty, use only this subset of nodes (minus the
2937
      offline ones)
2938
  @param cl: if not None, luxi client to use
2939
  @type nowarn: boolean
2940
  @param nowarn: by default, this function will output a note with the
2941
      offline nodes that are skipped; if this parameter is True the
2942
      note is not displayed
2943
  @type secondary_ips: boolean
2944
  @param secondary_ips: if True, return the secondary IPs instead of the
2945
      names, useful for doing network traffic over the replication interface
2946
      (if any)
2947
  @type filter_master: boolean
2948
  @param filter_master: if True, do not return the master node in the list
2949
      (useful in coordination with secondary_ips where we cannot check our
2950
      node name against the list)
2951
  @type nodegroup: string
2952
  @param nodegroup: If set, only return nodes in this node group
2953

2954
  """
2955
  if cl is None:
2956
    cl = GetClient()
2957

    
2958
  qfilter = []
2959

    
2960
  if nodes:
2961
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
2962

    
2963
  if nodegroup is not None:
2964
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2965
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2966

    
2967
  if filter_master:
2968
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2969

    
2970
  if qfilter:
2971
    if len(qfilter) > 1:
2972
      final_filter = [qlang.OP_AND] + qfilter
2973
    else:
2974
      assert len(qfilter) == 1
2975
      final_filter = qfilter[0]
2976
  else:
2977
    final_filter = None
2978

    
2979
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2980

    
2981
  def _IsOffline(row):
2982
    (_, (_, offline), _) = row
2983
    return offline
2984

    
2985
  def _GetName(row):
2986
    ((_, name), _, _) = row
2987
    return name
2988

    
2989
  def _GetSip(row):
2990
    (_, _, (_, sip)) = row
2991
    return sip
2992

    
2993
  (offline, online) = compat.partition(result.data, _IsOffline)
2994

    
2995
  if offline and not nowarn:
2996
    ToStderr("Note: skipping offline node(s): %s" %
2997
             utils.CommaJoin(map(_GetName, offline)))
2998

    
2999
  if secondary_ips:
3000
    fn = _GetSip
3001
  else:
3002
    fn = _GetName
3003

    
3004
  return map(fn, online)
3005

    
3006

    
3007
def _ToStream(stream, txt, *args):
3008
  """Write a message to a stream, bypassing the logging system
3009

3010
  @type stream: file object
3011
  @param stream: the file to which we should write
3012
  @type txt: str
3013
  @param txt: the message
3014

3015
  """
3016
  try:
3017
    if args:
3018
      args = tuple(args)
3019
      stream.write(txt % args)
3020
    else:
3021
      stream.write(txt)
3022
    stream.write("\n")
3023
    stream.flush()
3024
  except IOError, err:
3025
    if err.errno == errno.EPIPE:
3026
      # our terminal went away, we'll exit
3027
      sys.exit(constants.EXIT_FAILURE)
3028
    else:
3029
      raise
3030

    
3031

    
3032
def ToStdout(txt, *args):
3033
  """Write a message to stdout only, bypassing the logging system
3034

3035
  This is just a wrapper over _ToStream.
3036

3037
  @type txt: str
3038
  @param txt: the message
3039

3040
  """
3041
  _ToStream(sys.stdout, txt, *args)
3042

    
3043

    
3044
def ToStderr(txt, *args):
3045
  """Write a message to stderr only, bypassing the logging system
3046

3047
  This is just a wrapper over _ToStream.
3048

3049
  @type txt: str
3050
  @param txt: the message
3051

3052
  """
3053
  _ToStream(sys.stderr, txt, *args)
3054

    
3055

    
3056
class JobExecutor(object):
3057
  """Class which manages the submission and execution of multiple jobs.
3058

3059
  Note that instances of this class should not be reused between
3060
  GetResults() calls.
3061

3062
  """
3063
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3064
    self.queue = []
3065
    if cl is None:
3066
      cl = GetClient()
3067
    self.cl = cl
3068
    self.verbose = verbose
3069
    self.jobs = []
3070
    self.opts = opts
3071
    self.feedback_fn = feedback_fn
3072
    self._counter = itertools.count()
3073

    
3074
  @staticmethod
3075
  def _IfName(name, fmt):
3076
    """Helper function for formatting name.
3077

3078
    """
3079
    if name:
3080
      return fmt % name
3081

    
3082
    return ""
3083

    
3084
  def QueueJob(self, name, *ops):
3085
    """Record a job for later submit.
3086

3087
    @type name: string
3088
    @param name: a description of the job, will be used in WaitJobSet
3089

3090
    """
3091
    SetGenericOpcodeOpts(ops, self.opts)
3092
    self.queue.append((self._counter.next(), name, ops))
3093

    
3094
  def AddJobId(self, name, status, job_id):
3095
    """Adds a job ID to the internal queue.
3096

3097
    """
3098
    self.jobs.append((self._counter.next(), status, job_id, name))
3099

    
3100
  def SubmitPending(self, each=False):
3101
    """Submit all pending jobs.
3102

3103
    """
3104
    if each:
3105
      results = []
3106
      for (_, _, ops) in self.queue:
3107
        # SubmitJob will remove the success status, but raise an exception if
3108
        # the submission fails, so we'll notice that anyway.
3109
        results.append([True, self.cl.SubmitJob(ops)])
3110
    else:
3111
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3112
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3113
      self.jobs.append((idx, status, data, name))
3114

    
3115
  def _ChooseJob(self):
3116
    """Choose a non-waiting/queued job to poll next.
3117

3118
    """
3119
    assert self.jobs, "_ChooseJob called with empty job list"
3120

    
3121
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3122
                               ["status"])
3123
    assert result
3124

    
3125
    for job_data, status in zip(self.jobs, result):
3126
      if (isinstance(status, list) and status and
3127
          status[0] in (constants.JOB_STATUS_QUEUED,
3128
                        constants.JOB_STATUS_WAITING,
3129
                        constants.JOB_STATUS_CANCELING)):
3130
        # job is still present and waiting
3131
        continue
3132
      # good candidate found (either running job or lost job)
3133
      self.jobs.remove(job_data)
3134
      return job_data
3135

    
3136
    # no job found
3137
    return self.jobs.pop(0)
3138

    
3139
  def GetResults(self):
3140
    """Wait for and return the results of all jobs.
3141

3142
    @rtype: list
3143
    @return: list of tuples (success, job results), in the same order
3144
        as the submitted jobs; if a job has failed, instead of the result
3145
        there will be the error message
3146

3147
    """
3148
    if not self.jobs:
3149
      self.SubmitPending()
3150
    results = []
3151
    if self.verbose:
3152
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3153
      if ok_jobs:
3154
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3155

    
3156
    # first, remove any non-submitted jobs
3157
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3158
    for idx, _, jid, name in failures:
3159
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3160
      results.append((idx, False, jid))
3161

    
3162
    while self.jobs:
3163
      (idx, _, jid, name) = self._ChooseJob()
3164
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3165
      try:
3166
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3167
        success = True
3168
      except errors.JobLost, err:
3169
        _, job_result = FormatError(err)
3170
        ToStderr("Job %s%s has been archived, cannot check its result",
3171
                 jid, self._IfName(name, " for %s"))
3172
        success = False
3173
      except (errors.GenericError, luxi.ProtocolError), err:
3174
        _, job_result = FormatError(err)
3175
        success = False
3176
        # the error message will always be shown, verbose or not
3177
        ToStderr("Job %s%s has failed: %s",
3178
                 jid, self._IfName(name, " for %s"), job_result)
3179

    
3180
      results.append((idx, success, job_result))
3181

    
3182
    # sort based on the index, then drop it
3183
    results.sort()
3184
    results = [i[1:] for i in results]
3185

    
3186
    return results
3187

    
3188
  def WaitOrShow(self, wait):
3189
    """Wait for job results or only print the job IDs.
3190

3191
    @type wait: boolean
3192
    @param wait: whether to wait or not
3193

3194
    """
3195
    if wait:
3196
      return self.GetResults()
3197
    else:
3198
      if not self.jobs:
3199
        self.SubmitPending()
3200
      for _, status, result, name in self.jobs:
3201
        if status:
3202
          ToStdout("%s: %s", result, name)
3203
        else:
3204
          ToStderr("Failure for %s: %s", name, result)
3205
      return [row[1:3] for row in self.jobs]
3206

    
3207

    
3208
def FormatParameterDict(buf, param_dict, actual, level=1):
3209
  """Formats a parameter dictionary.
3210

3211
  @type buf: L{StringIO}
3212
  @param buf: the buffer into which to write
3213
  @type param_dict: dict
3214
  @param param_dict: the own parameters
3215
  @type actual: dict
3216
  @param actual: the current parameter set (including defaults)
3217
  @param level: Level of indent
3218

3219
  """
3220
  indent = "  " * level
3221
  for key in sorted(actual):
3222
    val = param_dict.get(key, "default (%s)" % actual[key])
3223
    buf.write("%s- %s: %s\n" % (indent, key, val))
3224

    
3225

    
3226
def ConfirmOperation(names, list_type, text, extra=""):
3227
  """Ask the user to confirm an operation on a list of list_type.
3228

3229
  This function is used to request confirmation for doing an operation
3230
  on a given list of list_type.
3231

3232
  @type names: list
3233
  @param names: the list of names that we display when
3234
      we ask for confirmation
3235
  @type list_type: str
3236
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3237
  @type text: str
3238
  @param text: the operation that the user should confirm
3239
  @rtype: boolean
3240
  @return: True or False depending on user's confirmation.
3241

3242
  """
3243
  count = len(names)
3244
  msg = ("The %s will operate on %d %s.\n%s"
3245
         "Do you want to continue?" % (text, count, list_type, extra))
3246
  affected = (("\nAffected %s:\n" % list_type) +
3247
              "\n".join(["  %s" % name for name in names]))
3248

    
3249
  choices = [("y", True, "Yes, execute the %s" % text),
3250
             ("n", False, "No, abort the %s" % text)]
3251

    
3252
  if count > 20:
3253
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3254
    question = msg
3255
  else:
3256
    question = msg + affected
3257

    
3258
  choice = AskUser(question, choices)
3259
  if choice == "v":
3260
    choices.pop(1)
3261
    choice = AskUser(msg + affected, choices)
3262
  return choice