Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ ef9fa5b9

History | View | Annotate | Download (104.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46

    
47
from optparse import (OptionParser, TitledHelpFormatter,
48
                      Option, OptionValueError)
49

    
50

    
51
__all__ = [
52
  # Command line options
53
  "ADD_UIDS_OPT",
54
  "ALLOCATABLE_OPT",
55
  "ALLOC_POLICY_OPT",
56
  "ALL_OPT",
57
  "ALLOW_FAILOVER_OPT",
58
  "AUTO_PROMOTE_OPT",
59
  "AUTO_REPLACE_OPT",
60
  "BACKEND_OPT",
61
  "BLK_OS_OPT",
62
  "CAPAB_MASTER_OPT",
63
  "CAPAB_VM_OPT",
64
  "CLEANUP_OPT",
65
  "CLUSTER_DOMAIN_SECRET_OPT",
66
  "CONFIRM_OPT",
67
  "CP_SIZE_OPT",
68
  "DEBUG_OPT",
69
  "DEBUG_SIMERR_OPT",
70
  "DISKIDX_OPT",
71
  "DISK_OPT",
72
  "DISK_TEMPLATE_OPT",
73
  "DRAINED_OPT",
74
  "DRY_RUN_OPT",
75
  "DRBD_HELPER_OPT",
76
  "DST_NODE_OPT",
77
  "EARLY_RELEASE_OPT",
78
  "ENABLED_HV_OPT",
79
  "ERROR_CODES_OPT",
80
  "FIELDS_OPT",
81
  "FILESTORE_DIR_OPT",
82
  "FILESTORE_DRIVER_OPT",
83
  "FORCE_FILTER_OPT",
84
  "FORCE_OPT",
85
  "FORCE_VARIANT_OPT",
86
  "GLOBAL_FILEDIR_OPT",
87
  "HID_OS_OPT",
88
  "GLOBAL_SHARED_FILEDIR_OPT",
89
  "HVLIST_OPT",
90
  "HVOPTS_OPT",
91
  "HYPERVISOR_OPT",
92
  "IALLOCATOR_OPT",
93
  "DEFAULT_IALLOCATOR_OPT",
94
  "IDENTIFY_DEFAULTS_OPT",
95
  "IGNORE_CONSIST_OPT",
96
  "IGNORE_ERRORS_OPT",
97
  "IGNORE_FAILURES_OPT",
98
  "IGNORE_OFFLINE_OPT",
99
  "IGNORE_REMOVE_FAILURES_OPT",
100
  "IGNORE_SECONDARIES_OPT",
101
  "IGNORE_SIZE_OPT",
102
  "INTERVAL_OPT",
103
  "MAC_PREFIX_OPT",
104
  "MAINTAIN_NODE_HEALTH_OPT",
105
  "MASTER_NETDEV_OPT",
106
  "MASTER_NETMASK_OPT",
107
  "MC_OPT",
108
  "MIGRATION_MODE_OPT",
109
  "NET_OPT",
110
  "NEW_CLUSTER_CERT_OPT",
111
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
112
  "NEW_CONFD_HMAC_KEY_OPT",
113
  "NEW_RAPI_CERT_OPT",
114
  "NEW_SECONDARY_OPT",
115
  "NEW_SPICE_CERT_OPT",
116
  "NIC_PARAMS_OPT",
117
  "NODE_FORCE_JOIN_OPT",
118
  "NODE_LIST_OPT",
119
  "NODE_PLACEMENT_OPT",
120
  "NODEGROUP_OPT",
121
  "NODE_PARAMS_OPT",
122
  "NODE_POWERED_OPT",
123
  "NODRBD_STORAGE_OPT",
124
  "NOHDR_OPT",
125
  "NOIPCHECK_OPT",
126
  "NO_INSTALL_OPT",
127
  "NONAMECHECK_OPT",
128
  "NOLVM_STORAGE_OPT",
129
  "NOMODIFY_ETCHOSTS_OPT",
130
  "NOMODIFY_SSH_SETUP_OPT",
131
  "NONICS_OPT",
132
  "NONLIVE_OPT",
133
  "NONPLUS1_OPT",
134
  "NOSHUTDOWN_OPT",
135
  "NOSTART_OPT",
136
  "NOSSH_KEYCHECK_OPT",
137
  "NOVOTING_OPT",
138
  "NO_REMEMBER_OPT",
139
  "NWSYNC_OPT",
140
  "ON_PRIMARY_OPT",
141
  "ON_SECONDARY_OPT",
142
  "OFFLINE_OPT",
143
  "OSPARAMS_OPT",
144
  "OS_OPT",
145
  "OS_SIZE_OPT",
146
  "OOB_TIMEOUT_OPT",
147
  "POWER_DELAY_OPT",
148
  "PREALLOC_WIPE_DISKS_OPT",
149
  "PRIMARY_IP_VERSION_OPT",
150
  "PRIMARY_ONLY_OPT",
151
  "PRIORITY_OPT",
152
  "RAPI_CERT_OPT",
153
  "READD_OPT",
154
  "REBOOT_TYPE_OPT",
155
  "REMOVE_INSTANCE_OPT",
156
  "REMOVE_UIDS_OPT",
157
  "RESERVED_LVS_OPT",
158
  "ROMAN_OPT",
159
  "SECONDARY_IP_OPT",
160
  "SECONDARY_ONLY_OPT",
161
  "SELECT_OS_OPT",
162
  "SEP_OPT",
163
  "SHOWCMD_OPT",
164
  "SHUTDOWN_TIMEOUT_OPT",
165
  "SINGLE_NODE_OPT",
166
  "SPICE_CACERT_OPT",
167
  "SPICE_CERT_OPT",
168
  "SRC_DIR_OPT",
169
  "SRC_NODE_OPT",
170
  "SUBMIT_OPT",
171
  "STARTUP_PAUSED_OPT",
172
  "STATIC_OPT",
173
  "SYNC_OPT",
174
  "TAG_ADD_OPT",
175
  "TAG_SRC_OPT",
176
  "TIMEOUT_OPT",
177
  "TO_GROUP_OPT",
178
  "UIDPOOL_OPT",
179
  "USEUNITS_OPT",
180
  "USE_REPL_NET_OPT",
181
  "VERBOSE_OPT",
182
  "VG_NAME_OPT",
183
  "YES_DOIT_OPT",
184
  # Generic functions for CLI programs
185
  "ConfirmOperation",
186
  "GenericMain",
187
  "GenericInstanceCreate",
188
  "GenericList",
189
  "GenericListFields",
190
  "GetClient",
191
  "GetOnlineNodes",
192
  "JobExecutor",
193
  "JobSubmittedException",
194
  "ParseTimespec",
195
  "RunWhileClusterStopped",
196
  "SubmitOpCode",
197
  "SubmitOrSend",
198
  "UsesRPC",
199
  # Formatting functions
200
  "ToStderr", "ToStdout",
201
  "FormatError",
202
  "FormatQueryResult",
203
  "FormatParameterDict",
204
  "GenerateTable",
205
  "AskUser",
206
  "FormatTimestamp",
207
  "FormatLogMessage",
208
  # Tags functions
209
  "ListTags",
210
  "AddTags",
211
  "RemoveTags",
212
  # command line options support infrastructure
213
  "ARGS_MANY_INSTANCES",
214
  "ARGS_MANY_NODES",
215
  "ARGS_MANY_GROUPS",
216
  "ARGS_NONE",
217
  "ARGS_ONE_INSTANCE",
218
  "ARGS_ONE_NODE",
219
  "ARGS_ONE_GROUP",
220
  "ARGS_ONE_OS",
221
  "ArgChoice",
222
  "ArgCommand",
223
  "ArgFile",
224
  "ArgGroup",
225
  "ArgHost",
226
  "ArgInstance",
227
  "ArgJobId",
228
  "ArgNode",
229
  "ArgOs",
230
  "ArgSuggest",
231
  "ArgUnknown",
232
  "OPT_COMPL_INST_ADD_NODES",
233
  "OPT_COMPL_MANY_NODES",
234
  "OPT_COMPL_ONE_IALLOCATOR",
235
  "OPT_COMPL_ONE_INSTANCE",
236
  "OPT_COMPL_ONE_NODE",
237
  "OPT_COMPL_ONE_NODEGROUP",
238
  "OPT_COMPL_ONE_OS",
239
  "cli_option",
240
  "SplitNodeOption",
241
  "CalculateOSNames",
242
  "ParseFields",
243
  "COMMON_CREATE_OPTS",
244
  ]
245

    
246
NO_PREFIX = "no_"
247
UN_PREFIX = "-"
248

    
249
#: Priorities (sorted)
250
_PRIORITY_NAMES = [
251
  ("low", constants.OP_PRIO_LOW),
252
  ("normal", constants.OP_PRIO_NORMAL),
253
  ("high", constants.OP_PRIO_HIGH),
254
  ]
255

    
256
#: Priority dictionary for easier lookup
257
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
258
# we migrate to Python 2.6
259
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
260

    
261
# Query result status for clients
262
(QR_NORMAL,
263
 QR_UNKNOWN,
264
 QR_INCOMPLETE) = range(3)
265

    
266
#: Maximum batch size for ChooseJob
267
_CHOOSE_BATCH = 25
268

    
269

    
270
class _Argument:
271
  def __init__(self, min=0, max=None): # pylint: disable=W0622
272
    self.min = min
273
    self.max = max
274

    
275
  def __repr__(self):
276
    return ("<%s min=%s max=%s>" %
277
            (self.__class__.__name__, self.min, self.max))
278

    
279

    
280
class ArgSuggest(_Argument):
281
  """Suggesting argument.
282

283
  Value can be any of the ones passed to the constructor.
284

285
  """
286
  # pylint: disable=W0622
287
  def __init__(self, min=0, max=None, choices=None):
288
    _Argument.__init__(self, min=min, max=max)
289
    self.choices = choices
290

    
291
  def __repr__(self):
292
    return ("<%s min=%s max=%s choices=%r>" %
293
            (self.__class__.__name__, self.min, self.max, self.choices))
294

    
295

    
296
class ArgChoice(ArgSuggest):
297
  """Choice argument.
298

299
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
300
  but value must be one of the choices.
301

302
  """
303

    
304

    
305
class ArgUnknown(_Argument):
306
  """Unknown argument to program (e.g. determined at runtime).
307

308
  """
309

    
310

    
311
class ArgInstance(_Argument):
312
  """Instances argument.
313

314
  """
315

    
316

    
317
class ArgNode(_Argument):
318
  """Node argument.
319

320
  """
321

    
322

    
323
class ArgGroup(_Argument):
324
  """Node group argument.
325

326
  """
327

    
328

    
329
class ArgJobId(_Argument):
330
  """Job ID argument.
331

332
  """
333

    
334

    
335
class ArgFile(_Argument):
336
  """File path argument.
337

338
  """
339

    
340

    
341
class ArgCommand(_Argument):
342
  """Command argument.
343

344
  """
345

    
346

    
347
class ArgHost(_Argument):
348
  """Host argument.
349

350
  """
351

    
352

    
353
class ArgOs(_Argument):
354
  """OS argument.
355

356
  """
357

    
358

    
359
ARGS_NONE = []
360
ARGS_MANY_INSTANCES = [ArgInstance()]
361
ARGS_MANY_NODES = [ArgNode()]
362
ARGS_MANY_GROUPS = [ArgGroup()]
363
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
364
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
365
# TODO
366
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
367
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
368

    
369

    
370
def _ExtractTagsObject(opts, args):
371
  """Extract the tag type object.
372

373
  Note that this function will modify its args parameter.
374

375
  """
376
  if not hasattr(opts, "tag_type"):
377
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
378
  kind = opts.tag_type
379
  if kind == constants.TAG_CLUSTER:
380
    retval = kind, kind
381
  elif kind in (constants.TAG_NODEGROUP,
382
                constants.TAG_NODE,
383
                constants.TAG_INSTANCE):
384
    if not args:
385
      raise errors.OpPrereqError("no arguments passed to the command")
386
    name = args.pop(0)
387
    retval = kind, name
388
  else:
389
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
390
  return retval
391

    
392

    
393
def _ExtendTags(opts, args):
394
  """Extend the args if a source file has been given.
395

396
  This function will extend the tags with the contents of the file
397
  passed in the 'tags_source' attribute of the opts parameter. A file
398
  named '-' will be replaced by stdin.
399

400
  """
401
  fname = opts.tags_source
402
  if fname is None:
403
    return
404
  if fname == "-":
405
    new_fh = sys.stdin
406
  else:
407
    new_fh = open(fname, "r")
408
  new_data = []
409
  try:
410
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
411
    # because of python bug 1633941
412
    while True:
413
      line = new_fh.readline()
414
      if not line:
415
        break
416
      new_data.append(line.strip())
417
  finally:
418
    new_fh.close()
419
  args.extend(new_data)
420

    
421

    
422
def ListTags(opts, args):
423
  """List the tags on a given object.
424

425
  This is a generic implementation that knows how to deal with all
426
  three cases of tag objects (cluster, node, instance). The opts
427
  argument is expected to contain a tag_type field denoting what
428
  object type we work on.
429

430
  """
431
  kind, name = _ExtractTagsObject(opts, args)
432
  cl = GetClient()
433
  result = cl.QueryTags(kind, name)
434
  result = list(result)
435
  result.sort()
436
  for tag in result:
437
    ToStdout(tag)
438

    
439

    
440
def AddTags(opts, args):
441
  """Add tags on a given object.
442

443
  This is a generic implementation that knows how to deal with all
444
  three cases of tag objects (cluster, node, instance). The opts
445
  argument is expected to contain a tag_type field denoting what
446
  object type we work on.
447

448
  """
449
  kind, name = _ExtractTagsObject(opts, args)
450
  _ExtendTags(opts, args)
451
  if not args:
452
    raise errors.OpPrereqError("No tags to be added")
453
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
454
  SubmitOpCode(op, opts=opts)
455

    
456

    
457
def RemoveTags(opts, args):
458
  """Remove tags from a given object.
459

460
  This is a generic implementation that knows how to deal with all
461
  three cases of tag objects (cluster, node, instance). The opts
462
  argument is expected to contain a tag_type field denoting what
463
  object type we work on.
464

465
  """
466
  kind, name = _ExtractTagsObject(opts, args)
467
  _ExtendTags(opts, args)
468
  if not args:
469
    raise errors.OpPrereqError("No tags to be removed")
470
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
471
  SubmitOpCode(op, opts=opts)
472

    
473

    
474
def check_unit(option, opt, value): # pylint: disable=W0613
475
  """OptParsers custom converter for units.
476

477
  """
478
  try:
479
    return utils.ParseUnit(value)
480
  except errors.UnitParseError, err:
481
    raise OptionValueError("option %s: %s" % (opt, err))
482

    
483

    
484
def _SplitKeyVal(opt, data):
485
  """Convert a KeyVal string into a dict.
486

487
  This function will convert a key=val[,...] string into a dict. Empty
488
  values will be converted specially: keys which have the prefix 'no_'
489
  will have the value=False and the prefix stripped, the others will
490
  have value=True.
491

492
  @type opt: string
493
  @param opt: a string holding the option name for which we process the
494
      data, used in building error messages
495
  @type data: string
496
  @param data: a string of the format key=val,key=val,...
497
  @rtype: dict
498
  @return: {key=val, key=val}
499
  @raises errors.ParameterError: if there are duplicate keys
500

501
  """
502
  kv_dict = {}
503
  if data:
504
    for elem in utils.UnescapeAndSplit(data, sep=","):
505
      if "=" in elem:
506
        key, val = elem.split("=", 1)
507
      else:
508
        if elem.startswith(NO_PREFIX):
509
          key, val = elem[len(NO_PREFIX):], False
510
        elif elem.startswith(UN_PREFIX):
511
          key, val = elem[len(UN_PREFIX):], None
512
        else:
513
          key, val = elem, True
514
      if key in kv_dict:
515
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
516
                                    (key, opt))
517
      kv_dict[key] = val
518
  return kv_dict
519

    
520

    
521
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
522
  """Custom parser for ident:key=val,key=val options.
523

524
  This will store the parsed values as a tuple (ident, {key: val}). As such,
525
  multiple uses of this option via action=append is possible.
526

527
  """
528
  if ":" not in value:
529
    ident, rest = value, ""
530
  else:
531
    ident, rest = value.split(":", 1)
532

    
533
  if ident.startswith(NO_PREFIX):
534
    if rest:
535
      msg = "Cannot pass options when removing parameter groups: %s" % value
536
      raise errors.ParameterError(msg)
537
    retval = (ident[len(NO_PREFIX):], False)
538
  elif ident.startswith(UN_PREFIX):
539
    if rest:
540
      msg = "Cannot pass options when removing parameter groups: %s" % value
541
      raise errors.ParameterError(msg)
542
    retval = (ident[len(UN_PREFIX):], None)
543
  else:
544
    kv_dict = _SplitKeyVal(opt, rest)
545
    retval = (ident, kv_dict)
546
  return retval
547

    
548

    
549
def check_key_val(option, opt, value):  # pylint: disable=W0613
550
  """Custom parser class for key=val,key=val options.
551

552
  This will store the parsed values as a dict {key: val}.
553

554
  """
555
  return _SplitKeyVal(opt, value)
556

    
557

    
558
def check_bool(option, opt, value): # pylint: disable=W0613
559
  """Custom parser for yes/no options.
560

561
  This will store the parsed value as either True or False.
562

563
  """
564
  value = value.lower()
565
  if value == constants.VALUE_FALSE or value == "no":
566
    return False
567
  elif value == constants.VALUE_TRUE or value == "yes":
568
    return True
569
  else:
570
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
571

    
572

    
573
# completion_suggestion is normally a list. Using numeric values not evaluating
574
# to False for dynamic completion.
575
(OPT_COMPL_MANY_NODES,
576
 OPT_COMPL_ONE_NODE,
577
 OPT_COMPL_ONE_INSTANCE,
578
 OPT_COMPL_ONE_OS,
579
 OPT_COMPL_ONE_IALLOCATOR,
580
 OPT_COMPL_INST_ADD_NODES,
581
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
582

    
583
OPT_COMPL_ALL = frozenset([
584
  OPT_COMPL_MANY_NODES,
585
  OPT_COMPL_ONE_NODE,
586
  OPT_COMPL_ONE_INSTANCE,
587
  OPT_COMPL_ONE_OS,
588
  OPT_COMPL_ONE_IALLOCATOR,
589
  OPT_COMPL_INST_ADD_NODES,
590
  OPT_COMPL_ONE_NODEGROUP,
591
  ])
592

    
593

    
594
class CliOption(Option):
595
  """Custom option class for optparse.
596

597
  """
598
  ATTRS = Option.ATTRS + [
599
    "completion_suggest",
600
    ]
601
  TYPES = Option.TYPES + (
602
    "identkeyval",
603
    "keyval",
604
    "unit",
605
    "bool",
606
    )
607
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
608
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
609
  TYPE_CHECKER["keyval"] = check_key_val
610
  TYPE_CHECKER["unit"] = check_unit
611
  TYPE_CHECKER["bool"] = check_bool
612

    
613

    
614
# optparse.py sets make_option, so we do it for our own option class, too
615
cli_option = CliOption
616

    
617

    
618
_YORNO = "yes|no"
619

    
620
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
621
                       help="Increase debugging level")
622

    
623
NOHDR_OPT = cli_option("--no-headers", default=False,
624
                       action="store_true", dest="no_headers",
625
                       help="Don't display column headers")
626

    
627
SEP_OPT = cli_option("--separator", default=None,
628
                     action="store", dest="separator",
629
                     help=("Separator between output fields"
630
                           " (defaults to one space)"))
631

    
632
USEUNITS_OPT = cli_option("--units", default=None,
633
                          dest="units", choices=("h", "m", "g", "t"),
634
                          help="Specify units for output (one of h/m/g/t)")
635

    
636
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
637
                        type="string", metavar="FIELDS",
638
                        help="Comma separated list of output fields")
639

    
640
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
641
                       default=False, help="Force the operation")
642

    
643
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
644
                         default=False, help="Do not require confirmation")
645

    
646
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
647
                                  action="store_true", default=False,
648
                                  help=("Ignore offline nodes and do as much"
649
                                        " as possible"))
650

    
651
TAG_ADD_OPT = cli_option("--tags", dest="tags",
652
                         default=None, help="Comma-separated list of instance"
653
                                            " tags")
654

    
655
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
656
                         default=None, help="File with tag names")
657

    
658
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
659
                        default=False, action="store_true",
660
                        help=("Submit the job and return the job ID, but"
661
                              " don't wait for the job to finish"))
662

    
663
SYNC_OPT = cli_option("--sync", dest="do_locking",
664
                      default=False, action="store_true",
665
                      help=("Grab locks while doing the queries"
666
                            " in order to ensure more consistent results"))
667

    
668
DRY_RUN_OPT = cli_option("--dry-run", default=False,
669
                         action="store_true",
670
                         help=("Do not execute the operation, just run the"
671
                               " check steps and verify it it could be"
672
                               " executed"))
673

    
674
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
675
                         action="store_true",
676
                         help="Increase the verbosity of the operation")
677

    
678
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
679
                              action="store_true", dest="simulate_errors",
680
                              help="Debugging option that makes the operation"
681
                              " treat most runtime checks as failed")
682

    
683
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
684
                        default=True, action="store_false",
685
                        help="Don't wait for sync (DANGEROUS!)")
686

    
687
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
688
                               help=("Custom disk setup (%s)" %
689
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
690
                               default=None, metavar="TEMPL",
691
                               choices=list(constants.DISK_TEMPLATES))
692

    
693
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
694
                        help="Do not create any network cards for"
695
                        " the instance")
696

    
697
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
698
                               help="Relative path under default cluster-wide"
699
                               " file storage dir to store file-based disks",
700
                               default=None, metavar="<DIR>")
701

    
702
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
703
                                  help="Driver to use for image files",
704
                                  default="loop", metavar="<DRIVER>",
705
                                  choices=list(constants.FILE_DRIVER))
706

    
707
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
708
                            help="Select nodes for the instance automatically"
709
                            " using the <NAME> iallocator plugin",
710
                            default=None, type="string",
711
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
712

    
713
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
714
                            metavar="<NAME>",
715
                            help="Set the default instance allocator plugin",
716
                            default=None, type="string",
717
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
718

    
719
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
720
                    metavar="<os>",
721
                    completion_suggest=OPT_COMPL_ONE_OS)
722

    
723
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
724
                         type="keyval", default={},
725
                         help="OS parameters")
726

    
727
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
728
                               action="store_true", default=False,
729
                               help="Force an unknown variant")
730

    
731
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
732
                            action="store_true", default=False,
733
                            help="Do not install the OS (will"
734
                            " enable no-start)")
735

    
736
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
737
                         type="keyval", default={},
738
                         help="Backend parameters")
739

    
740
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
741
                        default={}, dest="hvparams",
742
                        help="Hypervisor parameters")
743

    
744
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
745
                            help="Hypervisor and hypervisor options, in the"
746
                            " format hypervisor:option=value,option=value,...",
747
                            default=None, type="identkeyval")
748

    
749
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
750
                        help="Hypervisor and hypervisor options, in the"
751
                        " format hypervisor:option=value,option=value,...",
752
                        default=[], action="append", type="identkeyval")
753

    
754
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
755
                           action="store_false",
756
                           help="Don't check that the instance's IP"
757
                           " is alive")
758

    
759
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
760
                             default=True, action="store_false",
761
                             help="Don't check that the instance's name"
762
                             " is resolvable")
763

    
764
NET_OPT = cli_option("--net",
765
                     help="NIC parameters", default=[],
766
                     dest="nics", action="append", type="identkeyval")
767

    
768
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
769
                      dest="disks", action="append", type="identkeyval")
770

    
771
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
772
                         help="Comma-separated list of disks"
773
                         " indices to act on (e.g. 0,2) (optional,"
774
                         " defaults to all disks)")
775

    
776
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
777
                         help="Enforces a single-disk configuration using the"
778
                         " given disk size, in MiB unless a suffix is used",
779
                         default=None, type="unit", metavar="<size>")
780

    
781
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
782
                                dest="ignore_consistency",
783
                                action="store_true", default=False,
784
                                help="Ignore the consistency of the disks on"
785
                                " the secondary")
786

    
787
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
788
                                dest="allow_failover",
789
                                action="store_true", default=False,
790
                                help="If migration is not possible fallback to"
791
                                     " failover")
792

    
793
NONLIVE_OPT = cli_option("--non-live", dest="live",
794
                         default=True, action="store_false",
795
                         help="Do a non-live migration (this usually means"
796
                         " freeze the instance, save the state, transfer and"
797
                         " only then resume running on the secondary node)")
798

    
799
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
800
                                default=None,
801
                                choices=list(constants.HT_MIGRATION_MODES),
802
                                help="Override default migration mode (choose"
803
                                " either live or non-live")
804

    
805
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
806
                                help="Target node and optional secondary node",
807
                                metavar="<pnode>[:<snode>]",
808
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
809

    
810
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
811
                           action="append", metavar="<node>",
812
                           help="Use only this node (can be used multiple"
813
                           " times, if not given defaults to all nodes)",
814
                           completion_suggest=OPT_COMPL_ONE_NODE)
815

    
816
NODEGROUP_OPT_NAME = "--node-group"
817
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
818
                           dest="nodegroup",
819
                           help="Node group (name or uuid)",
820
                           metavar="<nodegroup>",
821
                           default=None, type="string",
822
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
823

    
824
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
825
                             metavar="<node>",
826
                             completion_suggest=OPT_COMPL_ONE_NODE)
827

    
828
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
829
                         action="store_false",
830
                         help="Don't start the instance after creation")
831

    
832
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
833
                         action="store_true", default=False,
834
                         help="Show command instead of executing it")
835

    
836
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
837
                         default=False, action="store_true",
838
                         help="Instead of performing the migration, try to"
839
                         " recover from a failed cleanup. This is safe"
840
                         " to run even if the instance is healthy, but it"
841
                         " will create extra replication traffic and "
842
                         " disrupt briefly the replication (like during the"
843
                         " migration")
844

    
845
STATIC_OPT = cli_option("-s", "--static", dest="static",
846
                        action="store_true", default=False,
847
                        help="Only show configuration data, not runtime data")
848

    
849
ALL_OPT = cli_option("--all", dest="show_all",
850
                     default=False, action="store_true",
851
                     help="Show info on all instances on the cluster."
852
                     " This can take a long time to run, use wisely")
853

    
854
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
855
                           action="store_true", default=False,
856
                           help="Interactive OS reinstall, lists available"
857
                           " OS templates for selection")
858

    
859
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
860
                                 action="store_true", default=False,
861
                                 help="Remove the instance from the cluster"
862
                                 " configuration even if there are failures"
863
                                 " during the removal process")
864

    
865
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
866
                                        dest="ignore_remove_failures",
867
                                        action="store_true", default=False,
868
                                        help="Remove the instance from the"
869
                                        " cluster configuration even if there"
870
                                        " are failures during the removal"
871
                                        " process")
872

    
873
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
874
                                 action="store_true", default=False,
875
                                 help="Remove the instance from the cluster")
876

    
877
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
878
                               help="Specifies the new node for the instance",
879
                               metavar="NODE", default=None,
880
                               completion_suggest=OPT_COMPL_ONE_NODE)
881

    
882
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
883
                               help="Specifies the new secondary node",
884
                               metavar="NODE", default=None,
885
                               completion_suggest=OPT_COMPL_ONE_NODE)
886

    
887
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
888
                            default=False, action="store_true",
889
                            help="Replace the disk(s) on the primary"
890
                                 " node (applies only to internally mirrored"
891
                                 " disk templates, e.g. %s)" %
892
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
893

    
894
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
895
                              default=False, action="store_true",
896
                              help="Replace the disk(s) on the secondary"
897
                                   " node (applies only to internally mirrored"
898
                                   " disk templates, e.g. %s)" %
899
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
900

    
901
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
902
                              default=False, action="store_true",
903
                              help="Lock all nodes and auto-promote as needed"
904
                              " to MC status")
905

    
906
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
907
                              default=False, action="store_true",
908
                              help="Automatically replace faulty disks"
909
                                   " (applies only to internally mirrored"
910
                                   " disk templates, e.g. %s)" %
911
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
912

    
913
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
914
                             default=False, action="store_true",
915
                             help="Ignore current recorded size"
916
                             " (useful for forcing activation when"
917
                             " the recorded size is wrong)")
918

    
919
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
920
                          metavar="<node>",
921
                          completion_suggest=OPT_COMPL_ONE_NODE)
922

    
923
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
924
                         metavar="<dir>")
925

    
926
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
927
                              help="Specify the secondary ip for the node",
928
                              metavar="ADDRESS", default=None)
929

    
930
READD_OPT = cli_option("--readd", dest="readd",
931
                       default=False, action="store_true",
932
                       help="Readd old node after replacing it")
933

    
934
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
935
                                default=True, action="store_false",
936
                                help="Disable SSH key fingerprint checking")
937

    
938
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
939
                                 default=False, action="store_true",
940
                                 help="Force the joining of a node")
941

    
942
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
943
                    type="bool", default=None, metavar=_YORNO,
944
                    help="Set the master_candidate flag on the node")
945

    
946
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
947
                         type="bool", default=None,
948
                         help=("Set the offline flag on the node"
949
                               " (cluster does not communicate with offline"
950
                               " nodes)"))
951

    
952
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
953
                         type="bool", default=None,
954
                         help=("Set the drained flag on the node"
955
                               " (excluded from allocation operations)"))
956

    
957
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
958
                    type="bool", default=None, metavar=_YORNO,
959
                    help="Set the master_capable flag on the node")
960

    
961
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
962
                    type="bool", default=None, metavar=_YORNO,
963
                    help="Set the vm_capable flag on the node")
964

    
965
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
966
                             type="bool", default=None, metavar=_YORNO,
967
                             help="Set the allocatable flag on a volume")
968

    
969
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
970
                               help="Disable support for lvm based instances"
971
                               " (cluster-wide)",
972
                               action="store_false", default=True)
973

    
974
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
975
                            dest="enabled_hypervisors",
976
                            help="Comma-separated list of hypervisors",
977
                            type="string", default=None)
978

    
979
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
980
                            type="keyval", default={},
981
                            help="NIC parameters")
982

    
983
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
984
                         dest="candidate_pool_size", type="int",
985
                         help="Set the candidate pool size")
986

    
987
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
988
                         help=("Enables LVM and specifies the volume group"
989
                               " name (cluster-wide) for disk allocation"
990
                               " [%s]" % constants.DEFAULT_VG),
991
                         metavar="VG", default=None)
992

    
993
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
994
                          help="Destroy cluster", action="store_true")
995

    
996
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
997
                          help="Skip node agreement check (dangerous)",
998
                          action="store_true", default=False)
999

    
1000
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1001
                            help="Specify the mac prefix for the instance IP"
1002
                            " addresses, in the format XX:XX:XX",
1003
                            metavar="PREFIX",
1004
                            default=None)
1005

    
1006
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1007
                               help="Specify the node interface (cluster-wide)"
1008
                               " on which the master IP address will be added"
1009
                               " (cluster init default: %s)" %
1010
                               constants.DEFAULT_BRIDGE,
1011
                               metavar="NETDEV",
1012
                               default=None)
1013

    
1014
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1015
                                help="Specify the netmask of the master IP",
1016
                                metavar="NETMASK",
1017
                                default=None)
1018

    
1019
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1020
                                help="Specify the default directory (cluster-"
1021
                                "wide) for storing the file-based disks [%s]" %
1022
                                constants.DEFAULT_FILE_STORAGE_DIR,
1023
                                metavar="DIR",
1024
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1025

    
1026
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1027
                            dest="shared_file_storage_dir",
1028
                            help="Specify the default directory (cluster-"
1029
                            "wide) for storing the shared file-based"
1030
                            " disks [%s]" %
1031
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1032
                            metavar="SHAREDDIR",
1033
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1034

    
1035
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1036
                                   help="Don't modify /etc/hosts",
1037
                                   action="store_false", default=True)
1038

    
1039
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1040
                                    help="Don't initialize SSH keys",
1041
                                    action="store_false", default=True)
1042

    
1043
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1044
                             help="Enable parseable error messages",
1045
                             action="store_true", default=False)
1046

    
1047
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1048
                          help="Skip N+1 memory redundancy tests",
1049
                          action="store_true", default=False)
1050

    
1051
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1052
                             help="Type of reboot: soft/hard/full",
1053
                             default=constants.INSTANCE_REBOOT_HARD,
1054
                             metavar="<REBOOT>",
1055
                             choices=list(constants.REBOOT_TYPES))
1056

    
1057
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1058
                                    dest="ignore_secondaries",
1059
                                    default=False, action="store_true",
1060
                                    help="Ignore errors from secondaries")
1061

    
1062
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1063
                            action="store_false", default=True,
1064
                            help="Don't shutdown the instance (unsafe)")
1065

    
1066
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1067
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1068
                         help="Maximum time to wait")
1069

    
1070
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1071
                         dest="shutdown_timeout", type="int",
1072
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1073
                         help="Maximum time to wait for instance shutdown")
1074

    
1075
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1076
                          default=None,
1077
                          help=("Number of seconds between repetions of the"
1078
                                " command"))
1079

    
1080
EARLY_RELEASE_OPT = cli_option("--early-release",
1081
                               dest="early_release", default=False,
1082
                               action="store_true",
1083
                               help="Release the locks on the secondary"
1084
                               " node(s) early")
1085

    
1086
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1087
                                  dest="new_cluster_cert",
1088
                                  default=False, action="store_true",
1089
                                  help="Generate a new cluster certificate")
1090

    
1091
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1092
                           default=None,
1093
                           help="File containing new RAPI certificate")
1094

    
1095
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1096
                               default=None, action="store_true",
1097
                               help=("Generate a new self-signed RAPI"
1098
                                     " certificate"))
1099

    
1100
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1101
                           default=None,
1102
                           help="File containing new SPICE certificate")
1103

    
1104
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1105
                           default=None,
1106
                           help="File containing the certificate of the CA"
1107
                                " which signed the SPICE certificate")
1108

    
1109
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1110
                               dest="new_spice_cert", default=None,
1111
                               action="store_true",
1112
                               help=("Generate a new self-signed SPICE"
1113
                                     " certificate"))
1114

    
1115
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1116
                                    dest="new_confd_hmac_key",
1117
                                    default=False, action="store_true",
1118
                                    help=("Create a new HMAC key for %s" %
1119
                                          constants.CONFD))
1120

    
1121
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1122
                                       dest="cluster_domain_secret",
1123
                                       default=None,
1124
                                       help=("Load new new cluster domain"
1125
                                             " secret from file"))
1126

    
1127
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1128
                                           dest="new_cluster_domain_secret",
1129
                                           default=False, action="store_true",
1130
                                           help=("Create a new cluster domain"
1131
                                                 " secret"))
1132

    
1133
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1134
                              dest="use_replication_network",
1135
                              help="Whether to use the replication network"
1136
                              " for talking to the nodes",
1137
                              action="store_true", default=False)
1138

    
1139
MAINTAIN_NODE_HEALTH_OPT = \
1140
    cli_option("--maintain-node-health", dest="maintain_node_health",
1141
               metavar=_YORNO, default=None, type="bool",
1142
               help="Configure the cluster to automatically maintain node"
1143
               " health, by shutting down unknown instances, shutting down"
1144
               " unknown DRBD devices, etc.")
1145

    
1146
IDENTIFY_DEFAULTS_OPT = \
1147
    cli_option("--identify-defaults", dest="identify_defaults",
1148
               default=False, action="store_true",
1149
               help="Identify which saved instance parameters are equal to"
1150
               " the current cluster defaults and set them as such, instead"
1151
               " of marking them as overridden")
1152

    
1153
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1154
                         action="store", dest="uid_pool",
1155
                         help=("A list of user-ids or user-id"
1156
                               " ranges separated by commas"))
1157

    
1158
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1159
                          action="store", dest="add_uids",
1160
                          help=("A list of user-ids or user-id"
1161
                                " ranges separated by commas, to be"
1162
                                " added to the user-id pool"))
1163

    
1164
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1165
                             action="store", dest="remove_uids",
1166
                             help=("A list of user-ids or user-id"
1167
                                   " ranges separated by commas, to be"
1168
                                   " removed from the user-id pool"))
1169

    
1170
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1171
                             action="store", dest="reserved_lvs",
1172
                             help=("A comma-separated list of reserved"
1173
                                   " logical volumes names, that will be"
1174
                                   " ignored by cluster verify"))
1175

    
1176
ROMAN_OPT = cli_option("--roman",
1177
                       dest="roman_integers", default=False,
1178
                       action="store_true",
1179
                       help="Use roman numbers for positive integers")
1180

    
1181
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1182
                             action="store", default=None,
1183
                             help="Specifies usermode helper for DRBD")
1184

    
1185
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1186
                                action="store_false", default=True,
1187
                                help="Disable support for DRBD")
1188

    
1189
PRIMARY_IP_VERSION_OPT = \
1190
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1191
               action="store", dest="primary_ip_version",
1192
               metavar="%d|%d" % (constants.IP4_VERSION,
1193
                                  constants.IP6_VERSION),
1194
               help="Cluster-wide IP version for primary IP")
1195

    
1196
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1197
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1198
                          choices=_PRIONAME_TO_VALUE.keys(),
1199
                          help="Priority for opcode processing")
1200

    
1201
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1202
                        type="bool", default=None, metavar=_YORNO,
1203
                        help="Sets the hidden flag on the OS")
1204

    
1205
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1206
                        type="bool", default=None, metavar=_YORNO,
1207
                        help="Sets the blacklisted flag on the OS")
1208

    
1209
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1210
                                     type="bool", metavar=_YORNO,
1211
                                     dest="prealloc_wipe_disks",
1212
                                     help=("Wipe disks prior to instance"
1213
                                           " creation"))
1214

    
1215
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1216
                             type="keyval", default=None,
1217
                             help="Node parameters")
1218

    
1219
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1220
                              action="store", metavar="POLICY", default=None,
1221
                              help="Allocation policy for the node group")
1222

    
1223
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1224
                              type="bool", metavar=_YORNO,
1225
                              dest="node_powered",
1226
                              help="Specify if the SoR for node is powered")
1227

    
1228
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1229
                         default=constants.OOB_TIMEOUT,
1230
                         help="Maximum time to wait for out-of-band helper")
1231

    
1232
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1233
                             default=constants.OOB_POWER_DELAY,
1234
                             help="Time in seconds to wait between power-ons")
1235

    
1236
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1237
                              action="store_true", default=False,
1238
                              help=("Whether command argument should be treated"
1239
                                    " as filter"))
1240

    
1241
NO_REMEMBER_OPT = cli_option("--no-remember",
1242
                             dest="no_remember",
1243
                             action="store_true", default=False,
1244
                             help="Perform but do not record the change"
1245
                             " in the configuration")
1246

    
1247
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1248
                              default=False, action="store_true",
1249
                              help="Evacuate primary instances only")
1250

    
1251
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1252
                                default=False, action="store_true",
1253
                                help="Evacuate secondary instances only"
1254
                                     " (applies only to internally mirrored"
1255
                                     " disk templates, e.g. %s)" %
1256
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1257

    
1258
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1259
                                action="store_true", default=False,
1260
                                help="Pause instance at startup")
1261

    
1262
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1263
                          help="Destination node group (name or uuid)",
1264
                          default=None, action="append",
1265
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1266

    
1267
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1268
                               action="append", dest="ignore_errors",
1269
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1270
                               help="Error code to be ignored")
1271

    
1272

    
1273
#: Options provided by all commands
1274
COMMON_OPTS = [DEBUG_OPT]
1275

    
1276
# common options for creating instances. add and import then add their own
1277
# specific ones.
1278
COMMON_CREATE_OPTS = [
1279
  BACKEND_OPT,
1280
  DISK_OPT,
1281
  DISK_TEMPLATE_OPT,
1282
  FILESTORE_DIR_OPT,
1283
  FILESTORE_DRIVER_OPT,
1284
  HYPERVISOR_OPT,
1285
  IALLOCATOR_OPT,
1286
  NET_OPT,
1287
  NODE_PLACEMENT_OPT,
1288
  NOIPCHECK_OPT,
1289
  NONAMECHECK_OPT,
1290
  NONICS_OPT,
1291
  NWSYNC_OPT,
1292
  OSPARAMS_OPT,
1293
  OS_SIZE_OPT,
1294
  SUBMIT_OPT,
1295
  TAG_ADD_OPT,
1296
  DRY_RUN_OPT,
1297
  PRIORITY_OPT,
1298
  ]
1299

    
1300

    
1301
def _ParseArgs(argv, commands, aliases, env_override):
1302
  """Parser for the command line arguments.
1303

1304
  This function parses the arguments and returns the function which
1305
  must be executed together with its (modified) arguments.
1306

1307
  @param argv: the command line
1308
  @param commands: dictionary with special contents, see the design
1309
      doc for cmdline handling
1310
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1311
  @param env_override: list of env variables allowed for default args
1312

1313
  """
1314
  assert not (env_override - set(commands))
1315

    
1316
  if len(argv) == 0:
1317
    binary = "<command>"
1318
  else:
1319
    binary = argv[0].split("/")[-1]
1320

    
1321
  if len(argv) > 1 and argv[1] == "--version":
1322
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1323
             constants.RELEASE_VERSION)
1324
    # Quit right away. That way we don't have to care about this special
1325
    # argument. optparse.py does it the same.
1326
    sys.exit(0)
1327

    
1328
  if len(argv) < 2 or not (argv[1] in commands or
1329
                           argv[1] in aliases):
1330
    # let's do a nice thing
1331
    sortedcmds = commands.keys()
1332
    sortedcmds.sort()
1333

    
1334
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1335
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1336
    ToStdout("")
1337

    
1338
    # compute the max line length for cmd + usage
1339
    mlen = max([len(" %s" % cmd) for cmd in commands])
1340
    mlen = min(60, mlen) # should not get here...
1341

    
1342
    # and format a nice command list
1343
    ToStdout("Commands:")
1344
    for cmd in sortedcmds:
1345
      cmdstr = " %s" % (cmd,)
1346
      help_text = commands[cmd][4]
1347
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1348
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1349
      for line in help_lines:
1350
        ToStdout("%-*s   %s", mlen, "", line)
1351

    
1352
    ToStdout("")
1353

    
1354
    return None, None, None
1355

    
1356
  # get command, unalias it, and look it up in commands
1357
  cmd = argv.pop(1)
1358
  if cmd in aliases:
1359
    if cmd in commands:
1360
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1361
                                   " command" % cmd)
1362

    
1363
    if aliases[cmd] not in commands:
1364
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1365
                                   " command '%s'" % (cmd, aliases[cmd]))
1366

    
1367
    cmd = aliases[cmd]
1368

    
1369
  if cmd in env_override:
1370
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1371
    env_args = os.environ.get(args_env_name)
1372
    if env_args:
1373
      argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1374

    
1375
  func, args_def, parser_opts, usage, description = commands[cmd]
1376
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1377
                        description=description,
1378
                        formatter=TitledHelpFormatter(),
1379
                        usage="%%prog %s %s" % (cmd, usage))
1380
  parser.disable_interspersed_args()
1381
  options, args = parser.parse_args(args=argv[1:])
1382

    
1383
  if not _CheckArguments(cmd, args_def, args):
1384
    return None, None, None
1385

    
1386
  return func, options, args
1387

    
1388

    
1389
def _CheckArguments(cmd, args_def, args):
1390
  """Verifies the arguments using the argument definition.
1391

1392
  Algorithm:
1393

1394
    1. Abort with error if values specified by user but none expected.
1395

1396
    1. For each argument in definition
1397

1398
      1. Keep running count of minimum number of values (min_count)
1399
      1. Keep running count of maximum number of values (max_count)
1400
      1. If it has an unlimited number of values
1401

1402
        1. Abort with error if it's not the last argument in the definition
1403

1404
    1. If last argument has limited number of values
1405

1406
      1. Abort with error if number of values doesn't match or is too large
1407

1408
    1. Abort with error if user didn't pass enough values (min_count)
1409

1410
  """
1411
  if args and not args_def:
1412
    ToStderr("Error: Command %s expects no arguments", cmd)
1413
    return False
1414

    
1415
  min_count = None
1416
  max_count = None
1417
  check_max = None
1418

    
1419
  last_idx = len(args_def) - 1
1420

    
1421
  for idx, arg in enumerate(args_def):
1422
    if min_count is None:
1423
      min_count = arg.min
1424
    elif arg.min is not None:
1425
      min_count += arg.min
1426

    
1427
    if max_count is None:
1428
      max_count = arg.max
1429
    elif arg.max is not None:
1430
      max_count += arg.max
1431

    
1432
    if idx == last_idx:
1433
      check_max = (arg.max is not None)
1434

    
1435
    elif arg.max is None:
1436
      raise errors.ProgrammerError("Only the last argument can have max=None")
1437

    
1438
  if check_max:
1439
    # Command with exact number of arguments
1440
    if (min_count is not None and max_count is not None and
1441
        min_count == max_count and len(args) != min_count):
1442
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1443
      return False
1444

    
1445
    # Command with limited number of arguments
1446
    if max_count is not None and len(args) > max_count:
1447
      ToStderr("Error: Command %s expects only %d argument(s)",
1448
               cmd, max_count)
1449
      return False
1450

    
1451
  # Command with some required arguments
1452
  if min_count is not None and len(args) < min_count:
1453
    ToStderr("Error: Command %s expects at least %d argument(s)",
1454
             cmd, min_count)
1455
    return False
1456

    
1457
  return True
1458

    
1459

    
1460
def SplitNodeOption(value):
1461
  """Splits the value of a --node option.
1462

1463
  """
1464
  if value and ":" in value:
1465
    return value.split(":", 1)
1466
  else:
1467
    return (value, None)
1468

    
1469

    
1470
def CalculateOSNames(os_name, os_variants):
1471
  """Calculates all the names an OS can be called, according to its variants.
1472

1473
  @type os_name: string
1474
  @param os_name: base name of the os
1475
  @type os_variants: list or None
1476
  @param os_variants: list of supported variants
1477
  @rtype: list
1478
  @return: list of valid names
1479

1480
  """
1481
  if os_variants:
1482
    return ["%s+%s" % (os_name, v) for v in os_variants]
1483
  else:
1484
    return [os_name]
1485

    
1486

    
1487
def ParseFields(selected, default):
1488
  """Parses the values of "--field"-like options.
1489

1490
  @type selected: string or None
1491
  @param selected: User-selected options
1492
  @type default: list
1493
  @param default: Default fields
1494

1495
  """
1496
  if selected is None:
1497
    return default
1498

    
1499
  if selected.startswith("+"):
1500
    return default + selected[1:].split(",")
1501

    
1502
  return selected.split(",")
1503

    
1504

    
1505
UsesRPC = rpc.RunWithRPC
1506

    
1507

    
1508
def AskUser(text, choices=None):
1509
  """Ask the user a question.
1510

1511
  @param text: the question to ask
1512

1513
  @param choices: list with elements tuples (input_char, return_value,
1514
      description); if not given, it will default to: [('y', True,
1515
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1516
      note that the '?' char is reserved for help
1517

1518
  @return: one of the return values from the choices list; if input is
1519
      not possible (i.e. not running with a tty, we return the last
1520
      entry from the list
1521

1522
  """
1523
  if choices is None:
1524
    choices = [("y", True, "Perform the operation"),
1525
               ("n", False, "Do not perform the operation")]
1526
  if not choices or not isinstance(choices, list):
1527
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1528
  for entry in choices:
1529
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1530
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1531

    
1532
  answer = choices[-1][1]
1533
  new_text = []
1534
  for line in text.splitlines():
1535
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1536
  text = "\n".join(new_text)
1537
  try:
1538
    f = file("/dev/tty", "a+")
1539
  except IOError:
1540
    return answer
1541
  try:
1542
    chars = [entry[0] for entry in choices]
1543
    chars[-1] = "[%s]" % chars[-1]
1544
    chars.append("?")
1545
    maps = dict([(entry[0], entry[1]) for entry in choices])
1546
    while True:
1547
      f.write(text)
1548
      f.write("\n")
1549
      f.write("/".join(chars))
1550
      f.write(": ")
1551
      line = f.readline(2).strip().lower()
1552
      if line in maps:
1553
        answer = maps[line]
1554
        break
1555
      elif line == "?":
1556
        for entry in choices:
1557
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1558
        f.write("\n")
1559
        continue
1560
  finally:
1561
    f.close()
1562
  return answer
1563

    
1564

    
1565
class JobSubmittedException(Exception):
1566
  """Job was submitted, client should exit.
1567

1568
  This exception has one argument, the ID of the job that was
1569
  submitted. The handler should print this ID.
1570

1571
  This is not an error, just a structured way to exit from clients.
1572

1573
  """
1574

    
1575

    
1576
def SendJob(ops, cl=None):
1577
  """Function to submit an opcode without waiting for the results.
1578

1579
  @type ops: list
1580
  @param ops: list of opcodes
1581
  @type cl: luxi.Client
1582
  @param cl: the luxi client to use for communicating with the master;
1583
             if None, a new client will be created
1584

1585
  """
1586
  if cl is None:
1587
    cl = GetClient()
1588

    
1589
  job_id = cl.SubmitJob(ops)
1590

    
1591
  return job_id
1592

    
1593

    
1594
def GenericPollJob(job_id, cbs, report_cbs):
1595
  """Generic job-polling function.
1596

1597
  @type job_id: number
1598
  @param job_id: Job ID
1599
  @type cbs: Instance of L{JobPollCbBase}
1600
  @param cbs: Data callbacks
1601
  @type report_cbs: Instance of L{JobPollReportCbBase}
1602
  @param report_cbs: Reporting callbacks
1603

1604
  """
1605
  prev_job_info = None
1606
  prev_logmsg_serial = None
1607

    
1608
  status = None
1609

    
1610
  while True:
1611
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1612
                                      prev_logmsg_serial)
1613
    if not result:
1614
      # job not found, go away!
1615
      raise errors.JobLost("Job with id %s lost" % job_id)
1616

    
1617
    if result == constants.JOB_NOTCHANGED:
1618
      report_cbs.ReportNotChanged(job_id, status)
1619

    
1620
      # Wait again
1621
      continue
1622

    
1623
    # Split result, a tuple of (field values, log entries)
1624
    (job_info, log_entries) = result
1625
    (status, ) = job_info
1626

    
1627
    if log_entries:
1628
      for log_entry in log_entries:
1629
        (serial, timestamp, log_type, message) = log_entry
1630
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1631
                                    log_type, message)
1632
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1633

    
1634
    # TODO: Handle canceled and archived jobs
1635
    elif status in (constants.JOB_STATUS_SUCCESS,
1636
                    constants.JOB_STATUS_ERROR,
1637
                    constants.JOB_STATUS_CANCELING,
1638
                    constants.JOB_STATUS_CANCELED):
1639
      break
1640

    
1641
    prev_job_info = job_info
1642

    
1643
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1644
  if not jobs:
1645
    raise errors.JobLost("Job with id %s lost" % job_id)
1646

    
1647
  status, opstatus, result = jobs[0]
1648

    
1649
  if status == constants.JOB_STATUS_SUCCESS:
1650
    return result
1651

    
1652
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1653
    raise errors.OpExecError("Job was canceled")
1654

    
1655
  has_ok = False
1656
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1657
    if status == constants.OP_STATUS_SUCCESS:
1658
      has_ok = True
1659
    elif status == constants.OP_STATUS_ERROR:
1660
      errors.MaybeRaise(msg)
1661

    
1662
      if has_ok:
1663
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1664
                                 (idx, msg))
1665

    
1666
      raise errors.OpExecError(str(msg))
1667

    
1668
  # default failure mode
1669
  raise errors.OpExecError(result)
1670

    
1671

    
1672
class JobPollCbBase:
1673
  """Base class for L{GenericPollJob} callbacks.
1674

1675
  """
1676
  def __init__(self):
1677
    """Initializes this class.
1678

1679
    """
1680

    
1681
  def WaitForJobChangeOnce(self, job_id, fields,
1682
                           prev_job_info, prev_log_serial):
1683
    """Waits for changes on a job.
1684

1685
    """
1686
    raise NotImplementedError()
1687

    
1688
  def QueryJobs(self, job_ids, fields):
1689
    """Returns the selected fields for the selected job IDs.
1690

1691
    @type job_ids: list of numbers
1692
    @param job_ids: Job IDs
1693
    @type fields: list of strings
1694
    @param fields: Fields
1695

1696
    """
1697
    raise NotImplementedError()
1698

    
1699

    
1700
class JobPollReportCbBase:
1701
  """Base class for L{GenericPollJob} reporting callbacks.
1702

1703
  """
1704
  def __init__(self):
1705
    """Initializes this class.
1706

1707
    """
1708

    
1709
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1710
    """Handles a log message.
1711

1712
    """
1713
    raise NotImplementedError()
1714

    
1715
  def ReportNotChanged(self, job_id, status):
1716
    """Called for if a job hasn't changed in a while.
1717

1718
    @type job_id: number
1719
    @param job_id: Job ID
1720
    @type status: string or None
1721
    @param status: Job status if available
1722

1723
    """
1724
    raise NotImplementedError()
1725

    
1726

    
1727
class _LuxiJobPollCb(JobPollCbBase):
1728
  def __init__(self, cl):
1729
    """Initializes this class.
1730

1731
    """
1732
    JobPollCbBase.__init__(self)
1733
    self.cl = cl
1734

    
1735
  def WaitForJobChangeOnce(self, job_id, fields,
1736
                           prev_job_info, prev_log_serial):
1737
    """Waits for changes on a job.
1738

1739
    """
1740
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1741
                                        prev_job_info, prev_log_serial)
1742

    
1743
  def QueryJobs(self, job_ids, fields):
1744
    """Returns the selected fields for the selected job IDs.
1745

1746
    """
1747
    return self.cl.QueryJobs(job_ids, fields)
1748

    
1749

    
1750
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1751
  def __init__(self, feedback_fn):
1752
    """Initializes this class.
1753

1754
    """
1755
    JobPollReportCbBase.__init__(self)
1756

    
1757
    self.feedback_fn = feedback_fn
1758

    
1759
    assert callable(feedback_fn)
1760

    
1761
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1762
    """Handles a log message.
1763

1764
    """
1765
    self.feedback_fn((timestamp, log_type, log_msg))
1766

    
1767
  def ReportNotChanged(self, job_id, status):
1768
    """Called if a job hasn't changed in a while.
1769

1770
    """
1771
    # Ignore
1772

    
1773

    
1774
class StdioJobPollReportCb(JobPollReportCbBase):
1775
  def __init__(self):
1776
    """Initializes this class.
1777

1778
    """
1779
    JobPollReportCbBase.__init__(self)
1780

    
1781
    self.notified_queued = False
1782
    self.notified_waitlock = False
1783

    
1784
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1785
    """Handles a log message.
1786

1787
    """
1788
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1789
             FormatLogMessage(log_type, log_msg))
1790

    
1791
  def ReportNotChanged(self, job_id, status):
1792
    """Called if a job hasn't changed in a while.
1793

1794
    """
1795
    if status is None:
1796
      return
1797

    
1798
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1799
      ToStderr("Job %s is waiting in queue", job_id)
1800
      self.notified_queued = True
1801

    
1802
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1803
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1804
      self.notified_waitlock = True
1805

    
1806

    
1807
def FormatLogMessage(log_type, log_msg):
1808
  """Formats a job message according to its type.
1809

1810
  """
1811
  if log_type != constants.ELOG_MESSAGE:
1812
    log_msg = str(log_msg)
1813

    
1814
  return utils.SafeEncode(log_msg)
1815

    
1816

    
1817
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1818
  """Function to poll for the result of a job.
1819

1820
  @type job_id: job identified
1821
  @param job_id: the job to poll for results
1822
  @type cl: luxi.Client
1823
  @param cl: the luxi client to use for communicating with the master;
1824
             if None, a new client will be created
1825

1826
  """
1827
  if cl is None:
1828
    cl = GetClient()
1829

    
1830
  if reporter is None:
1831
    if feedback_fn:
1832
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1833
    else:
1834
      reporter = StdioJobPollReportCb()
1835
  elif feedback_fn:
1836
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1837

    
1838
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1839

    
1840

    
1841
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1842
  """Legacy function to submit an opcode.
1843

1844
  This is just a simple wrapper over the construction of the processor
1845
  instance. It should be extended to better handle feedback and
1846
  interaction functions.
1847

1848
  """
1849
  if cl is None:
1850
    cl = GetClient()
1851

    
1852
  SetGenericOpcodeOpts([op], opts)
1853

    
1854
  job_id = SendJob([op], cl=cl)
1855

    
1856
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1857
                       reporter=reporter)
1858

    
1859
  return op_results[0]
1860

    
1861

    
1862
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1863
  """Wrapper around SubmitOpCode or SendJob.
1864

1865
  This function will decide, based on the 'opts' parameter, whether to
1866
  submit and wait for the result of the opcode (and return it), or
1867
  whether to just send the job and print its identifier. It is used in
1868
  order to simplify the implementation of the '--submit' option.
1869

1870
  It will also process the opcodes if we're sending the via SendJob
1871
  (otherwise SubmitOpCode does it).
1872

1873
  """
1874
  if opts and opts.submit_only:
1875
    job = [op]
1876
    SetGenericOpcodeOpts(job, opts)
1877
    job_id = SendJob(job, cl=cl)
1878
    raise JobSubmittedException(job_id)
1879
  else:
1880
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1881

    
1882

    
1883
def SetGenericOpcodeOpts(opcode_list, options):
1884
  """Processor for generic options.
1885

1886
  This function updates the given opcodes based on generic command
1887
  line options (like debug, dry-run, etc.).
1888

1889
  @param opcode_list: list of opcodes
1890
  @param options: command line options or None
1891
  @return: None (in-place modification)
1892

1893
  """
1894
  if not options:
1895
    return
1896
  for op in opcode_list:
1897
    op.debug_level = options.debug
1898
    if hasattr(options, "dry_run"):
1899
      op.dry_run = options.dry_run
1900
    if getattr(options, "priority", None) is not None:
1901
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1902

    
1903

    
1904
def GetClient():
1905
  # TODO: Cache object?
1906
  try:
1907
    client = luxi.Client()
1908
  except luxi.NoMasterError:
1909
    ss = ssconf.SimpleStore()
1910

    
1911
    # Try to read ssconf file
1912
    try:
1913
      ss.GetMasterNode()
1914
    except errors.ConfigurationError:
1915
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1916
                                 " not part of a cluster")
1917

    
1918
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1919
    if master != myself:
1920
      raise errors.OpPrereqError("This is not the master node, please connect"
1921
                                 " to node '%s' and rerun the command" %
1922
                                 master)
1923
    raise
1924
  return client
1925

    
1926

    
1927
def FormatError(err):
1928
  """Return a formatted error message for a given error.
1929

1930
  This function takes an exception instance and returns a tuple
1931
  consisting of two values: first, the recommended exit code, and
1932
  second, a string describing the error message (not
1933
  newline-terminated).
1934

1935
  """
1936
  retcode = 1
1937
  obuf = StringIO()
1938
  msg = str(err)
1939
  if isinstance(err, errors.ConfigurationError):
1940
    txt = "Corrupt configuration file: %s" % msg
1941
    logging.error(txt)
1942
    obuf.write(txt + "\n")
1943
    obuf.write("Aborting.")
1944
    retcode = 2
1945
  elif isinstance(err, errors.HooksAbort):
1946
    obuf.write("Failure: hooks execution failed:\n")
1947
    for node, script, out in err.args[0]:
1948
      if out:
1949
        obuf.write("  node: %s, script: %s, output: %s\n" %
1950
                   (node, script, out))
1951
      else:
1952
        obuf.write("  node: %s, script: %s (no output)\n" %
1953
                   (node, script))
1954
  elif isinstance(err, errors.HooksFailure):
1955
    obuf.write("Failure: hooks general failure: %s" % msg)
1956
  elif isinstance(err, errors.ResolverError):
1957
    this_host = netutils.Hostname.GetSysName()
1958
    if err.args[0] == this_host:
1959
      msg = "Failure: can't resolve my own hostname ('%s')"
1960
    else:
1961
      msg = "Failure: can't resolve hostname '%s'"
1962
    obuf.write(msg % err.args[0])
1963
  elif isinstance(err, errors.OpPrereqError):
1964
    if len(err.args) == 2:
1965
      obuf.write("Failure: prerequisites not met for this"
1966
               " operation:\nerror type: %s, error details:\n%s" %
1967
                 (err.args[1], err.args[0]))
1968
    else:
1969
      obuf.write("Failure: prerequisites not met for this"
1970
                 " operation:\n%s" % msg)
1971
  elif isinstance(err, errors.OpExecError):
1972
    obuf.write("Failure: command execution error:\n%s" % msg)
1973
  elif isinstance(err, errors.TagError):
1974
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1975
  elif isinstance(err, errors.JobQueueDrainError):
1976
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1977
               " accept new requests\n")
1978
  elif isinstance(err, errors.JobQueueFull):
1979
    obuf.write("Failure: the job queue is full and doesn't accept new"
1980
               " job submissions until old jobs are archived\n")
1981
  elif isinstance(err, errors.TypeEnforcementError):
1982
    obuf.write("Parameter Error: %s" % msg)
1983
  elif isinstance(err, errors.ParameterError):
1984
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1985
  elif isinstance(err, luxi.NoMasterError):
1986
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1987
               " and listening for connections?")
1988
  elif isinstance(err, luxi.TimeoutError):
1989
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1990
               " been submitted and will continue to run even if the call"
1991
               " timed out. Useful commands in this situation are \"gnt-job"
1992
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1993
    obuf.write(msg)
1994
  elif isinstance(err, luxi.PermissionError):
1995
    obuf.write("It seems you don't have permissions to connect to the"
1996
               " master daemon.\nPlease retry as a different user.")
1997
  elif isinstance(err, luxi.ProtocolError):
1998
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1999
               "%s" % msg)
2000
  elif isinstance(err, errors.JobLost):
2001
    obuf.write("Error checking job status: %s" % msg)
2002
  elif isinstance(err, errors.QueryFilterParseError):
2003
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2004
    obuf.write("\n".join(err.GetDetails()))
2005
  elif isinstance(err, errors.GenericError):
2006
    obuf.write("Unhandled Ganeti error: %s" % msg)
2007
  elif isinstance(err, JobSubmittedException):
2008
    obuf.write("JobID: %s\n" % err.args[0])
2009
    retcode = 0
2010
  else:
2011
    obuf.write("Unhandled exception: %s" % msg)
2012
  return retcode, obuf.getvalue().rstrip("\n")
2013

    
2014

    
2015
def GenericMain(commands, override=None, aliases=None,
2016
                env_override=frozenset()):
2017
  """Generic main function for all the gnt-* commands.
2018

2019
  @param commands: a dictionary with a special structure, see the design doc
2020
                   for command line handling.
2021
  @param override: if not None, we expect a dictionary with keys that will
2022
                   override command line options; this can be used to pass
2023
                   options from the scripts to generic functions
2024
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2025
  @param env_override: list of environment names which are allowed to submit
2026
                       default args for commands
2027

2028
  """
2029
  # save the program name and the entire command line for later logging
2030
  if sys.argv:
2031
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2032
    if len(sys.argv) >= 2:
2033
      binary += " " + sys.argv[1]
2034
      old_cmdline = " ".join(sys.argv[2:])
2035
    else:
2036
      old_cmdline = ""
2037
  else:
2038
    binary = "<unknown program>"
2039
    old_cmdline = ""
2040

    
2041
  if aliases is None:
2042
    aliases = {}
2043

    
2044
  try:
2045
    func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2046
  except errors.ParameterError, err:
2047
    result, err_msg = FormatError(err)
2048
    ToStderr(err_msg)
2049
    return 1
2050

    
2051
  if func is None: # parse error
2052
    return 1
2053

    
2054
  if override is not None:
2055
    for key, val in override.iteritems():
2056
      setattr(options, key, val)
2057

    
2058
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2059
                     stderr_logging=True)
2060

    
2061
  if old_cmdline:
2062
    logging.info("run with arguments '%s'", old_cmdline)
2063
  else:
2064
    logging.info("run with no arguments")
2065

    
2066
  try:
2067
    result = func(options, args)
2068
  except (errors.GenericError, luxi.ProtocolError,
2069
          JobSubmittedException), err:
2070
    result, err_msg = FormatError(err)
2071
    logging.exception("Error during command processing")
2072
    ToStderr(err_msg)
2073
  except KeyboardInterrupt:
2074
    result = constants.EXIT_FAILURE
2075
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2076
             " might have been submitted and"
2077
             " will continue to run in the background.")
2078
  except IOError, err:
2079
    if err.errno == errno.EPIPE:
2080
      # our terminal went away, we'll exit
2081
      sys.exit(constants.EXIT_FAILURE)
2082
    else:
2083
      raise
2084

    
2085
  return result
2086

    
2087

    
2088
def ParseNicOption(optvalue):
2089
  """Parses the value of the --net option(s).
2090

2091
  """
2092
  try:
2093
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2094
  except (TypeError, ValueError), err:
2095
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2096

    
2097
  nics = [{}] * nic_max
2098
  for nidx, ndict in optvalue:
2099
    nidx = int(nidx)
2100

    
2101
    if not isinstance(ndict, dict):
2102
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2103
                                 " got %s" % (nidx, ndict))
2104

    
2105
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2106

    
2107
    nics[nidx] = ndict
2108

    
2109
  return nics
2110

    
2111

    
2112
def GenericInstanceCreate(mode, opts, args):
2113
  """Add an instance to the cluster via either creation or import.
2114

2115
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2116
  @param opts: the command line options selected by the user
2117
  @type args: list
2118
  @param args: should contain only one element, the new instance name
2119
  @rtype: int
2120
  @return: the desired exit code
2121

2122
  """
2123
  instance = args[0]
2124

    
2125
  (pnode, snode) = SplitNodeOption(opts.node)
2126

    
2127
  hypervisor = None
2128
  hvparams = {}
2129
  if opts.hypervisor:
2130
    hypervisor, hvparams = opts.hypervisor
2131

    
2132
  if opts.nics:
2133
    nics = ParseNicOption(opts.nics)
2134
  elif opts.no_nics:
2135
    # no nics
2136
    nics = []
2137
  elif mode == constants.INSTANCE_CREATE:
2138
    # default of one nic, all auto
2139
    nics = [{}]
2140
  else:
2141
    # mode == import
2142
    nics = []
2143

    
2144
  if opts.disk_template == constants.DT_DISKLESS:
2145
    if opts.disks or opts.sd_size is not None:
2146
      raise errors.OpPrereqError("Diskless instance but disk"
2147
                                 " information passed")
2148
    disks = []
2149
  else:
2150
    if (not opts.disks and not opts.sd_size
2151
        and mode == constants.INSTANCE_CREATE):
2152
      raise errors.OpPrereqError("No disk information specified")
2153
    if opts.disks and opts.sd_size is not None:
2154
      raise errors.OpPrereqError("Please use either the '--disk' or"
2155
                                 " '-s' option")
2156
    if opts.sd_size is not None:
2157
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2158

    
2159
    if opts.disks:
2160
      try:
2161
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2162
      except ValueError, err:
2163
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2164
      disks = [{}] * disk_max
2165
    else:
2166
      disks = []
2167
    for didx, ddict in opts.disks:
2168
      didx = int(didx)
2169
      if not isinstance(ddict, dict):
2170
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2171
        raise errors.OpPrereqError(msg)
2172
      elif constants.IDISK_SIZE in ddict:
2173
        if constants.IDISK_ADOPT in ddict:
2174
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2175
                                     " (disk %d)" % didx)
2176
        try:
2177
          ddict[constants.IDISK_SIZE] = \
2178
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2179
        except ValueError, err:
2180
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2181
                                     (didx, err))
2182
      elif constants.IDISK_ADOPT in ddict:
2183
        if mode == constants.INSTANCE_IMPORT:
2184
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2185
                                     " import")
2186
        ddict[constants.IDISK_SIZE] = 0
2187
      else:
2188
        raise errors.OpPrereqError("Missing size or adoption source for"
2189
                                   " disk %d" % didx)
2190
      disks[didx] = ddict
2191

    
2192
  if opts.tags is not None:
2193
    tags = opts.tags.split(",")
2194
  else:
2195
    tags = []
2196

    
2197
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2198
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2199

    
2200
  if mode == constants.INSTANCE_CREATE:
2201
    start = opts.start
2202
    os_type = opts.os
2203
    force_variant = opts.force_variant
2204
    src_node = None
2205
    src_path = None
2206
    no_install = opts.no_install
2207
    identify_defaults = False
2208
  elif mode == constants.INSTANCE_IMPORT:
2209
    start = False
2210
    os_type = None
2211
    force_variant = False
2212
    src_node = opts.src_node
2213
    src_path = opts.src_dir
2214
    no_install = None
2215
    identify_defaults = opts.identify_defaults
2216
  else:
2217
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2218

    
2219
  op = opcodes.OpInstanceCreate(instance_name=instance,
2220
                                disks=disks,
2221
                                disk_template=opts.disk_template,
2222
                                nics=nics,
2223
                                pnode=pnode, snode=snode,
2224
                                ip_check=opts.ip_check,
2225
                                name_check=opts.name_check,
2226
                                wait_for_sync=opts.wait_for_sync,
2227
                                file_storage_dir=opts.file_storage_dir,
2228
                                file_driver=opts.file_driver,
2229
                                iallocator=opts.iallocator,
2230
                                hypervisor=hypervisor,
2231
                                hvparams=hvparams,
2232
                                beparams=opts.beparams,
2233
                                osparams=opts.osparams,
2234
                                mode=mode,
2235
                                start=start,
2236
                                os_type=os_type,
2237
                                force_variant=force_variant,
2238
                                src_node=src_node,
2239
                                src_path=src_path,
2240
                                tags=tags,
2241
                                no_install=no_install,
2242
                                identify_defaults=identify_defaults)
2243

    
2244
  SubmitOrSend(op, opts)
2245
  return 0
2246

    
2247

    
2248
class _RunWhileClusterStoppedHelper:
2249
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2250

2251
  """
2252
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2253
    """Initializes this class.
2254

2255
    @type feedback_fn: callable
2256
    @param feedback_fn: Feedback function
2257
    @type cluster_name: string
2258
    @param cluster_name: Cluster name
2259
    @type master_node: string
2260
    @param master_node Master node name
2261
    @type online_nodes: list
2262
    @param online_nodes: List of names of online nodes
2263

2264
    """
2265
    self.feedback_fn = feedback_fn
2266
    self.cluster_name = cluster_name
2267
    self.master_node = master_node
2268
    self.online_nodes = online_nodes
2269

    
2270
    self.ssh = ssh.SshRunner(self.cluster_name)
2271

    
2272
    self.nonmaster_nodes = [name for name in online_nodes
2273
                            if name != master_node]
2274

    
2275
    assert self.master_node not in self.nonmaster_nodes
2276

    
2277
  def _RunCmd(self, node_name, cmd):
2278
    """Runs a command on the local or a remote machine.
2279

2280
    @type node_name: string
2281
    @param node_name: Machine name
2282
    @type cmd: list
2283
    @param cmd: Command
2284

2285
    """
2286
    if node_name is None or node_name == self.master_node:
2287
      # No need to use SSH
2288
      result = utils.RunCmd(cmd)
2289
    else:
2290
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2291

    
2292
    if result.failed:
2293
      errmsg = ["Failed to run command %s" % result.cmd]
2294
      if node_name:
2295
        errmsg.append("on node %s" % node_name)
2296
      errmsg.append(": exitcode %s and error %s" %
2297
                    (result.exit_code, result.output))
2298
      raise errors.OpExecError(" ".join(errmsg))
2299

    
2300
  def Call(self, fn, *args):
2301
    """Call function while all daemons are stopped.
2302

2303
    @type fn: callable
2304
    @param fn: Function to be called
2305

2306
    """
2307
    # Pause watcher by acquiring an exclusive lock on watcher state file
2308
    self.feedback_fn("Blocking watcher")
2309
    watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2310
    try:
2311
      # TODO: Currently, this just blocks. There's no timeout.
2312
      # TODO: Should it be a shared lock?
2313
      watcher_block.Exclusive(blocking=True)
2314

    
2315
      # Stop master daemons, so that no new jobs can come in and all running
2316
      # ones are finished
2317
      self.feedback_fn("Stopping master daemons")
2318
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2319
      try:
2320
        # Stop daemons on all nodes
2321
        for node_name in self.online_nodes:
2322
          self.feedback_fn("Stopping daemons on %s" % node_name)
2323
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2324

    
2325
        # All daemons are shut down now
2326
        try:
2327
          return fn(self, *args)
2328
        except Exception, err:
2329
          _, errmsg = FormatError(err)
2330
          logging.exception("Caught exception")
2331
          self.feedback_fn(errmsg)
2332
          raise
2333
      finally:
2334
        # Start cluster again, master node last
2335
        for node_name in self.nonmaster_nodes + [self.master_node]:
2336
          self.feedback_fn("Starting daemons on %s" % node_name)
2337
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2338
    finally:
2339
      # Resume watcher
2340
      watcher_block.Close()
2341

    
2342

    
2343
def RunWhileClusterStopped(feedback_fn, fn, *args):
2344
  """Calls a function while all cluster daemons are stopped.
2345

2346
  @type feedback_fn: callable
2347
  @param feedback_fn: Feedback function
2348
  @type fn: callable
2349
  @param fn: Function to be called when daemons are stopped
2350

2351
  """
2352
  feedback_fn("Gathering cluster information")
2353

    
2354
  # This ensures we're running on the master daemon
2355
  cl = GetClient()
2356

    
2357
  (cluster_name, master_node) = \
2358
    cl.QueryConfigValues(["cluster_name", "master_node"])
2359

    
2360
  online_nodes = GetOnlineNodes([], cl=cl)
2361

    
2362
  # Don't keep a reference to the client. The master daemon will go away.
2363
  del cl
2364

    
2365
  assert master_node in online_nodes
2366

    
2367
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2368
                                       online_nodes).Call(fn, *args)
2369

    
2370

    
2371
def GenerateTable(headers, fields, separator, data,
2372
                  numfields=None, unitfields=None,
2373
                  units=None):
2374
  """Prints a table with headers and different fields.
2375

2376
  @type headers: dict
2377
  @param headers: dictionary mapping field names to headers for
2378
      the table
2379
  @type fields: list
2380
  @param fields: the field names corresponding to each row in
2381
      the data field
2382
  @param separator: the separator to be used; if this is None,
2383
      the default 'smart' algorithm is used which computes optimal
2384
      field width, otherwise just the separator is used between
2385
      each field
2386
  @type data: list
2387
  @param data: a list of lists, each sublist being one row to be output
2388
  @type numfields: list
2389
  @param numfields: a list with the fields that hold numeric
2390
      values and thus should be right-aligned
2391
  @type unitfields: list
2392
  @param unitfields: a list with the fields that hold numeric
2393
      values that should be formatted with the units field
2394
  @type units: string or None
2395
  @param units: the units we should use for formatting, or None for
2396
      automatic choice (human-readable for non-separator usage, otherwise
2397
      megabytes); this is a one-letter string
2398

2399
  """
2400
  if units is None:
2401
    if separator:
2402
      units = "m"
2403
    else:
2404
      units = "h"
2405

    
2406
  if numfields is None:
2407
    numfields = []
2408
  if unitfields is None:
2409
    unitfields = []
2410

    
2411
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2412
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2413

    
2414
  format_fields = []
2415
  for field in fields:
2416
    if headers and field not in headers:
2417
      # TODO: handle better unknown fields (either revert to old
2418
      # style of raising exception, or deal more intelligently with
2419
      # variable fields)
2420
      headers[field] = field
2421
    if separator is not None:
2422
      format_fields.append("%s")
2423
    elif numfields.Matches(field):
2424
      format_fields.append("%*s")
2425
    else:
2426
      format_fields.append("%-*s")
2427

    
2428
  if separator is None:
2429
    mlens = [0 for name in fields]
2430
    format_str = " ".join(format_fields)
2431
  else:
2432
    format_str = separator.replace("%", "%%").join(format_fields)
2433

    
2434
  for row in data:
2435
    if row is None:
2436
      continue
2437
    for idx, val in enumerate(row):
2438
      if unitfields.Matches(fields[idx]):
2439
        try:
2440
          val = int(val)
2441
        except (TypeError, ValueError):
2442
          pass
2443
        else:
2444
          val = row[idx] = utils.FormatUnit(val, units)
2445
      val = row[idx] = str(val)
2446
      if separator is None:
2447
        mlens[idx] = max(mlens[idx], len(val))
2448

    
2449
  result = []
2450
  if headers:
2451
    args = []
2452
    for idx, name in enumerate(fields):
2453
      hdr = headers[name]
2454
      if separator is None:
2455
        mlens[idx] = max(mlens[idx], len(hdr))
2456
        args.append(mlens[idx])
2457
      args.append(hdr)
2458
    result.append(format_str % tuple(args))
2459

    
2460
  if separator is None:
2461
    assert len(mlens) == len(fields)
2462

    
2463
    if fields and not numfields.Matches(fields[-1]):
2464
      mlens[-1] = 0
2465

    
2466
  for line in data:
2467
    args = []
2468
    if line is None:
2469
      line = ["-" for _ in fields]
2470
    for idx in range(len(fields)):
2471
      if separator is None:
2472
        args.append(mlens[idx])
2473
      args.append(line[idx])
2474
    result.append(format_str % tuple(args))
2475

    
2476
  return result
2477

    
2478

    
2479
def _FormatBool(value):
2480
  """Formats a boolean value as a string.
2481

2482
  """
2483
  if value:
2484
    return "Y"
2485
  return "N"
2486

    
2487

    
2488
#: Default formatting for query results; (callback, align right)
2489
_DEFAULT_FORMAT_QUERY = {
2490
  constants.QFT_TEXT: (str, False),
2491
  constants.QFT_BOOL: (_FormatBool, False),
2492
  constants.QFT_NUMBER: (str, True),
2493
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2494
  constants.QFT_OTHER: (str, False),
2495
  constants.QFT_UNKNOWN: (str, False),
2496
  }
2497

    
2498

    
2499
def _GetColumnFormatter(fdef, override, unit):
2500
  """Returns formatting function for a field.
2501

2502
  @type fdef: L{objects.QueryFieldDefinition}
2503
  @type override: dict
2504
  @param override: Dictionary for overriding field formatting functions,
2505
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2506
  @type unit: string
2507
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2508
  @rtype: tuple; (callable, bool)
2509
  @return: Returns the function to format a value (takes one parameter) and a
2510
    boolean for aligning the value on the right-hand side
2511

2512
  """
2513
  fmt = override.get(fdef.name, None)
2514
  if fmt is not None:
2515
    return fmt
2516

    
2517
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2518

    
2519
  if fdef.kind == constants.QFT_UNIT:
2520
    # Can't keep this information in the static dictionary
2521
    return (lambda value: utils.FormatUnit(value, unit), True)
2522

    
2523
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2524
  if fmt is not None:
2525
    return fmt
2526

    
2527
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2528

    
2529

    
2530
class _QueryColumnFormatter:
2531
  """Callable class for formatting fields of a query.
2532

2533
  """
2534
  def __init__(self, fn, status_fn, verbose):
2535
    """Initializes this class.
2536

2537
    @type fn: callable
2538
    @param fn: Formatting function
2539
    @type status_fn: callable
2540
    @param status_fn: Function to report fields' status
2541
    @type verbose: boolean
2542
    @param verbose: whether to use verbose field descriptions or not
2543

2544
    """
2545
    self._fn = fn
2546
    self._status_fn = status_fn
2547
    self._verbose = verbose
2548

    
2549
  def __call__(self, data):
2550
    """Returns a field's string representation.
2551

2552
    """
2553
    (status, value) = data
2554

    
2555
    # Report status
2556
    self._status_fn(status)
2557

    
2558
    if status == constants.RS_NORMAL:
2559
      return self._fn(value)
2560

    
2561
    assert value is None, \
2562
           "Found value %r for abnormal status %s" % (value, status)
2563

    
2564
    return FormatResultError(status, self._verbose)
2565

    
2566

    
2567
def FormatResultError(status, verbose):
2568
  """Formats result status other than L{constants.RS_NORMAL}.
2569

2570
  @param status: The result status
2571
  @type verbose: boolean
2572
  @param verbose: Whether to return the verbose text
2573
  @return: Text of result status
2574

2575
  """
2576
  assert status != constants.RS_NORMAL, \
2577
         "FormatResultError called with status equal to constants.RS_NORMAL"
2578
  try:
2579
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2580
  except KeyError:
2581
    raise NotImplementedError("Unknown status %s" % status)
2582
  else:
2583
    if verbose:
2584
      return verbose_text
2585
    return normal_text
2586

    
2587

    
2588
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2589
                      header=False, verbose=False):
2590
  """Formats data in L{objects.QueryResponse}.
2591

2592
  @type result: L{objects.QueryResponse}
2593
  @param result: result of query operation
2594
  @type unit: string
2595
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2596
    see L{utils.text.FormatUnit}
2597
  @type format_override: dict
2598
  @param format_override: Dictionary for overriding field formatting functions,
2599
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2600
  @type separator: string or None
2601
  @param separator: String used to separate fields
2602
  @type header: bool
2603
  @param header: Whether to output header row
2604
  @type verbose: boolean
2605
  @param verbose: whether to use verbose field descriptions or not
2606

2607
  """
2608
  if unit is None:
2609
    if separator:
2610
      unit = "m"
2611
    else:
2612
      unit = "h"
2613

    
2614
  if format_override is None:
2615
    format_override = {}
2616

    
2617
  stats = dict.fromkeys(constants.RS_ALL, 0)
2618

    
2619
  def _RecordStatus(status):
2620
    if status in stats:
2621
      stats[status] += 1
2622

    
2623
  columns = []
2624
  for fdef in result.fields:
2625
    assert fdef.title and fdef.name
2626
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2627
    columns.append(TableColumn(fdef.title,
2628
                               _QueryColumnFormatter(fn, _RecordStatus,
2629
                                                     verbose),
2630
                               align_right))
2631

    
2632
  table = FormatTable(result.data, columns, header, separator)
2633

    
2634
  # Collect statistics
2635
  assert len(stats) == len(constants.RS_ALL)
2636
  assert compat.all(count >= 0 for count in stats.values())
2637

    
2638
  # Determine overall status. If there was no data, unknown fields must be
2639
  # detected via the field definitions.
2640
  if (stats[constants.RS_UNKNOWN] or
2641
      (not result.data and _GetUnknownFields(result.fields))):
2642
    status = QR_UNKNOWN
2643
  elif compat.any(count > 0 for key, count in stats.items()
2644
                  if key != constants.RS_NORMAL):
2645
    status = QR_INCOMPLETE
2646
  else:
2647
    status = QR_NORMAL
2648

    
2649
  return (status, table)
2650

    
2651

    
2652
def _GetUnknownFields(fdefs):
2653
  """Returns list of unknown fields included in C{fdefs}.
2654

2655
  @type fdefs: list of L{objects.QueryFieldDefinition}
2656

2657
  """
2658
  return [fdef for fdef in fdefs
2659
          if fdef.kind == constants.QFT_UNKNOWN]
2660

    
2661

    
2662
def _WarnUnknownFields(fdefs):
2663
  """Prints a warning to stderr if a query included unknown fields.
2664

2665
  @type fdefs: list of L{objects.QueryFieldDefinition}
2666

2667
  """
2668
  unknown = _GetUnknownFields(fdefs)
2669
  if unknown:
2670
    ToStderr("Warning: Queried for unknown fields %s",
2671
             utils.CommaJoin(fdef.name for fdef in unknown))
2672
    return True
2673

    
2674
  return False
2675

    
2676

    
2677
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2678
                format_override=None, verbose=False, force_filter=False):
2679
  """Generic implementation for listing all items of a resource.
2680

2681
  @param resource: One of L{constants.QR_VIA_LUXI}
2682
  @type fields: list of strings
2683
  @param fields: List of fields to query for
2684
  @type names: list of strings
2685
  @param names: Names of items to query for
2686
  @type unit: string or None
2687
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2688
    None for automatic choice (human-readable for non-separator usage,
2689
    otherwise megabytes); this is a one-letter string
2690
  @type separator: string or None
2691
  @param separator: String used to separate fields
2692
  @type header: bool
2693
  @param header: Whether to show header row
2694
  @type force_filter: bool
2695
  @param force_filter: Whether to always treat names as filter
2696
  @type format_override: dict
2697
  @param format_override: Dictionary for overriding field formatting functions,
2698
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2699
  @type verbose: boolean
2700
  @param verbose: whether to use verbose field descriptions or not
2701

2702
  """
2703
  if not names:
2704
    names = None
2705

    
2706
  qfilter = qlang.MakeFilter(names, force_filter)
2707

    
2708
  if cl is None:
2709
    cl = GetClient()
2710

    
2711
  response = cl.Query(resource, fields, qfilter)
2712

    
2713
  found_unknown = _WarnUnknownFields(response.fields)
2714

    
2715
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2716
                                     header=header,
2717
                                     format_override=format_override,
2718
                                     verbose=verbose)
2719

    
2720
  for line in data:
2721
    ToStdout(line)
2722

    
2723
  assert ((found_unknown and status == QR_UNKNOWN) or
2724
          (not found_unknown and status != QR_UNKNOWN))
2725

    
2726
  if status == QR_UNKNOWN:
2727
    return constants.EXIT_UNKNOWN_FIELD
2728

    
2729
  # TODO: Should the list command fail if not all data could be collected?
2730
  return constants.EXIT_SUCCESS
2731

    
2732

    
2733
def GenericListFields(resource, fields, separator, header, cl=None):
2734
  """Generic implementation for listing fields for a resource.
2735

2736
  @param resource: One of L{constants.QR_VIA_LUXI}
2737
  @type fields: list of strings
2738
  @param fields: List of fields to query for
2739
  @type separator: string or None
2740
  @param separator: String used to separate fields
2741
  @type header: bool
2742
  @param header: Whether to show header row
2743

2744
  """
2745
  if cl is None:
2746
    cl = GetClient()
2747

    
2748
  if not fields:
2749
    fields = None
2750

    
2751
  response = cl.QueryFields(resource, fields)
2752

    
2753
  found_unknown = _WarnUnknownFields(response.fields)
2754

    
2755
  columns = [
2756
    TableColumn("Name", str, False),
2757
    TableColumn("Title", str, False),
2758
    TableColumn("Description", str, False),
2759
    ]
2760

    
2761
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2762

    
2763
  for line in FormatTable(rows, columns, header, separator):
2764
    ToStdout(line)
2765

    
2766
  if found_unknown:
2767
    return constants.EXIT_UNKNOWN_FIELD
2768

    
2769
  return constants.EXIT_SUCCESS
2770

    
2771

    
2772
class TableColumn:
2773
  """Describes a column for L{FormatTable}.
2774

2775
  """
2776
  def __init__(self, title, fn, align_right):
2777
    """Initializes this class.
2778

2779
    @type title: string
2780
    @param title: Column title
2781
    @type fn: callable
2782
    @param fn: Formatting function
2783
    @type align_right: bool
2784
    @param align_right: Whether to align values on the right-hand side
2785

2786
    """
2787
    self.title = title
2788
    self.format = fn
2789
    self.align_right = align_right
2790

    
2791

    
2792
def _GetColFormatString(width, align_right):
2793
  """Returns the format string for a field.
2794

2795
  """
2796
  if align_right:
2797
    sign = ""
2798
  else:
2799
    sign = "-"
2800

    
2801
  return "%%%s%ss" % (sign, width)
2802

    
2803

    
2804
def FormatTable(rows, columns, header, separator):
2805
  """Formats data as a table.
2806

2807
  @type rows: list of lists
2808
  @param rows: Row data, one list per row
2809
  @type columns: list of L{TableColumn}
2810
  @param columns: Column descriptions
2811
  @type header: bool
2812
  @param header: Whether to show header row
2813
  @type separator: string or None
2814
  @param separator: String used to separate columns
2815

2816
  """
2817
  if header:
2818
    data = [[col.title for col in columns]]
2819
    colwidth = [len(col.title) for col in columns]
2820
  else:
2821
    data = []
2822
    colwidth = [0 for _ in columns]
2823

    
2824
  # Format row data
2825
  for row in rows:
2826
    assert len(row) == len(columns)
2827

    
2828
    formatted = [col.format(value) for value, col in zip(row, columns)]
2829

    
2830
    if separator is None:
2831
      # Update column widths
2832
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2833
        # Modifying a list's items while iterating is fine
2834
        colwidth[idx] = max(oldwidth, len(value))
2835

    
2836
    data.append(formatted)
2837

    
2838
  if separator is not None:
2839
    # Return early if a separator is used
2840
    return [separator.join(row) for row in data]
2841

    
2842
  if columns and not columns[-1].align_right:
2843
    # Avoid unnecessary spaces at end of line
2844
    colwidth[-1] = 0
2845

    
2846
  # Build format string
2847
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2848
                  for col, width in zip(columns, colwidth)])
2849

    
2850
  return [fmt % tuple(row) for row in data]
2851

    
2852

    
2853
def FormatTimestamp(ts):
2854
  """Formats a given timestamp.
2855

2856
  @type ts: timestamp
2857
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2858

2859
  @rtype: string
2860
  @return: a string with the formatted timestamp
2861

2862
  """
2863
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2864
    return "?"
2865
  sec, usec = ts
2866
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2867

    
2868

    
2869
def ParseTimespec(value):
2870
  """Parse a time specification.
2871

2872
  The following suffixed will be recognized:
2873

2874
    - s: seconds
2875
    - m: minutes
2876
    - h: hours
2877
    - d: day
2878
    - w: weeks
2879

2880
  Without any suffix, the value will be taken to be in seconds.
2881

2882
  """
2883
  value = str(value)
2884
  if not value:
2885
    raise errors.OpPrereqError("Empty time specification passed")
2886
  suffix_map = {
2887
    "s": 1,
2888
    "m": 60,
2889
    "h": 3600,
2890
    "d": 86400,
2891
    "w": 604800,
2892
    }
2893
  if value[-1] not in suffix_map:
2894
    try:
2895
      value = int(value)
2896
    except (TypeError, ValueError):
2897
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2898
  else:
2899
    multiplier = suffix_map[value[-1]]
2900
    value = value[:-1]
2901
    if not value: # no data left after stripping the suffix
2902
      raise errors.OpPrereqError("Invalid time specification (only"
2903
                                 " suffix passed)")
2904
    try:
2905
      value = int(value) * multiplier
2906
    except (TypeError, ValueError):
2907
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2908
  return value
2909

    
2910

    
2911
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2912
                   filter_master=False, nodegroup=None):
2913
  """Returns the names of online nodes.
2914

2915
  This function will also log a warning on stderr with the names of
2916
  the online nodes.
2917

2918
  @param nodes: if not empty, use only this subset of nodes (minus the
2919
      offline ones)
2920
  @param cl: if not None, luxi client to use
2921
  @type nowarn: boolean
2922
  @param nowarn: by default, this function will output a note with the
2923
      offline nodes that are skipped; if this parameter is True the
2924
      note is not displayed
2925
  @type secondary_ips: boolean
2926
  @param secondary_ips: if True, return the secondary IPs instead of the
2927
      names, useful for doing network traffic over the replication interface
2928
      (if any)
2929
  @type filter_master: boolean
2930
  @param filter_master: if True, do not return the master node in the list
2931
      (useful in coordination with secondary_ips where we cannot check our
2932
      node name against the list)
2933
  @type nodegroup: string
2934
  @param nodegroup: If set, only return nodes in this node group
2935

2936
  """
2937
  if cl is None:
2938
    cl = GetClient()
2939

    
2940
  qfilter = []
2941

    
2942
  if nodes:
2943
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
2944

    
2945
  if nodegroup is not None:
2946
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2947
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2948

    
2949
  if filter_master:
2950
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2951

    
2952
  if qfilter:
2953
    if len(qfilter) > 1:
2954
      final_filter = [qlang.OP_AND] + qfilter
2955
    else:
2956
      assert len(qfilter) == 1
2957
      final_filter = qfilter[0]
2958
  else:
2959
    final_filter = None
2960

    
2961
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2962

    
2963
  def _IsOffline(row):
2964
    (_, (_, offline), _) = row
2965
    return offline
2966

    
2967
  def _GetName(row):
2968
    ((_, name), _, _) = row
2969
    return name
2970

    
2971
  def _GetSip(row):
2972
    (_, _, (_, sip)) = row
2973
    return sip
2974

    
2975
  (offline, online) = compat.partition(result.data, _IsOffline)
2976

    
2977
  if offline and not nowarn:
2978
    ToStderr("Note: skipping offline node(s): %s" %
2979
             utils.CommaJoin(map(_GetName, offline)))
2980

    
2981
  if secondary_ips:
2982
    fn = _GetSip
2983
  else:
2984
    fn = _GetName
2985

    
2986
  return map(fn, online)
2987

    
2988

    
2989
def _ToStream(stream, txt, *args):
2990
  """Write a message to a stream, bypassing the logging system
2991

2992
  @type stream: file object
2993
  @param stream: the file to which we should write
2994
  @type txt: str
2995
  @param txt: the message
2996

2997
  """
2998
  try:
2999
    if args:
3000
      args = tuple(args)
3001
      stream.write(txt % args)
3002
    else:
3003
      stream.write(txt)
3004
    stream.write("\n")
3005
    stream.flush()
3006
  except IOError, err:
3007
    if err.errno == errno.EPIPE:
3008
      # our terminal went away, we'll exit
3009
      sys.exit(constants.EXIT_FAILURE)
3010
    else:
3011
      raise
3012

    
3013

    
3014
def ToStdout(txt, *args):
3015
  """Write a message to stdout only, bypassing the logging system
3016

3017
  This is just a wrapper over _ToStream.
3018

3019
  @type txt: str
3020
  @param txt: the message
3021

3022
  """
3023
  _ToStream(sys.stdout, txt, *args)
3024

    
3025

    
3026
def ToStderr(txt, *args):
3027
  """Write a message to stderr only, bypassing the logging system
3028

3029
  This is just a wrapper over _ToStream.
3030

3031
  @type txt: str
3032
  @param txt: the message
3033

3034
  """
3035
  _ToStream(sys.stderr, txt, *args)
3036

    
3037

    
3038
class JobExecutor(object):
3039
  """Class which manages the submission and execution of multiple jobs.
3040

3041
  Note that instances of this class should not be reused between
3042
  GetResults() calls.
3043

3044
  """
3045
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3046
    self.queue = []
3047
    if cl is None:
3048
      cl = GetClient()
3049
    self.cl = cl
3050
    self.verbose = verbose
3051
    self.jobs = []
3052
    self.opts = opts
3053
    self.feedback_fn = feedback_fn
3054
    self._counter = itertools.count()
3055

    
3056
  @staticmethod
3057
  def _IfName(name, fmt):
3058
    """Helper function for formatting name.
3059

3060
    """
3061
    if name:
3062
      return fmt % name
3063

    
3064
    return ""
3065

    
3066
  def QueueJob(self, name, *ops):
3067
    """Record a job for later submit.
3068

3069
    @type name: string
3070
    @param name: a description of the job, will be used in WaitJobSet
3071

3072
    """
3073
    SetGenericOpcodeOpts(ops, self.opts)
3074
    self.queue.append((self._counter.next(), name, ops))
3075

    
3076
  def AddJobId(self, name, status, job_id):
3077
    """Adds a job ID to the internal queue.
3078

3079
    """
3080
    self.jobs.append((self._counter.next(), status, job_id, name))
3081

    
3082
  def SubmitPending(self, each=False):
3083
    """Submit all pending jobs.
3084

3085
    """
3086
    if each:
3087
      results = []
3088
      for (_, _, ops) in self.queue:
3089
        # SubmitJob will remove the success status, but raise an exception if
3090
        # the submission fails, so we'll notice that anyway.
3091
        results.append([True, self.cl.SubmitJob(ops)])
3092
    else:
3093
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3094
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3095
      self.jobs.append((idx, status, data, name))
3096

    
3097
  def _ChooseJob(self):
3098
    """Choose a non-waiting/queued job to poll next.
3099

3100
    """
3101
    assert self.jobs, "_ChooseJob called with empty job list"
3102

    
3103
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3104
                               ["status"])
3105
    assert result
3106

    
3107
    for job_data, status in zip(self.jobs, result):
3108
      if (isinstance(status, list) and status and
3109
          status[0] in (constants.JOB_STATUS_QUEUED,
3110
                        constants.JOB_STATUS_WAITING,
3111
                        constants.JOB_STATUS_CANCELING)):
3112
        # job is still present and waiting
3113
        continue
3114
      # good candidate found (either running job or lost job)
3115
      self.jobs.remove(job_data)
3116
      return job_data
3117

    
3118
    # no job found
3119
    return self.jobs.pop(0)
3120

    
3121
  def GetResults(self):
3122
    """Wait for and return the results of all jobs.
3123

3124
    @rtype: list
3125
    @return: list of tuples (success, job results), in the same order
3126
        as the submitted jobs; if a job has failed, instead of the result
3127
        there will be the error message
3128

3129
    """
3130
    if not self.jobs:
3131
      self.SubmitPending()
3132
    results = []
3133
    if self.verbose:
3134
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3135
      if ok_jobs:
3136
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3137

    
3138
    # first, remove any non-submitted jobs
3139
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3140
    for idx, _, jid, name in failures:
3141
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3142
      results.append((idx, False, jid))
3143

    
3144
    while self.jobs:
3145
      (idx, _, jid, name) = self._ChooseJob()
3146
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3147
      try:
3148
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3149
        success = True
3150
      except errors.JobLost, err:
3151
        _, job_result = FormatError(err)
3152
        ToStderr("Job %s%s has been archived, cannot check its result",
3153
                 jid, self._IfName(name, " for %s"))
3154
        success = False
3155
      except (errors.GenericError, luxi.ProtocolError), err:
3156
        _, job_result = FormatError(err)
3157
        success = False
3158
        # the error message will always be shown, verbose or not
3159
        ToStderr("Job %s%s has failed: %s",
3160
                 jid, self._IfName(name, " for %s"), job_result)
3161

    
3162
      results.append((idx, success, job_result))
3163

    
3164
    # sort based on the index, then drop it
3165
    results.sort()
3166
    results = [i[1:] for i in results]
3167

    
3168
    return results
3169

    
3170
  def WaitOrShow(self, wait):
3171
    """Wait for job results or only print the job IDs.
3172

3173
    @type wait: boolean
3174
    @param wait: whether to wait or not
3175

3176
    """
3177
    if wait:
3178
      return self.GetResults()
3179
    else:
3180
      if not self.jobs:
3181
        self.SubmitPending()
3182
      for _, status, result, name in self.jobs:
3183
        if status:
3184
          ToStdout("%s: %s", result, name)
3185
        else:
3186
          ToStderr("Failure for %s: %s", name, result)
3187
      return [row[1:3] for row in self.jobs]
3188

    
3189

    
3190
def FormatParameterDict(buf, param_dict, actual, level=1):
3191
  """Formats a parameter dictionary.
3192

3193
  @type buf: L{StringIO}
3194
  @param buf: the buffer into which to write
3195
  @type param_dict: dict
3196
  @param param_dict: the own parameters
3197
  @type actual: dict
3198
  @param actual: the current parameter set (including defaults)
3199
  @param level: Level of indent
3200

3201
  """
3202
  indent = "  " * level
3203
  for key in sorted(actual):
3204
    val = param_dict.get(key, "default (%s)" % actual[key])
3205
    buf.write("%s- %s: %s\n" % (indent, key, val))
3206

    
3207

    
3208
def ConfirmOperation(names, list_type, text, extra=""):
3209
  """Ask the user to confirm an operation on a list of list_type.
3210

3211
  This function is used to request confirmation for doing an operation
3212
  on a given list of list_type.
3213

3214
  @type names: list
3215
  @param names: the list of names that we display when
3216
      we ask for confirmation
3217
  @type list_type: str
3218
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3219
  @type text: str
3220
  @param text: the operation that the user should confirm
3221
  @rtype: boolean
3222
  @return: True or False depending on user's confirmation.
3223

3224
  """
3225
  count = len(names)
3226
  msg = ("The %s will operate on %d %s.\n%s"
3227
         "Do you want to continue?" % (text, count, list_type, extra))
3228
  affected = (("\nAffected %s:\n" % list_type) +
3229
              "\n".join(["  %s" % name for name in names]))
3230

    
3231
  choices = [("y", True, "Yes, execute the %s" % text),
3232
             ("n", False, "No, abort the %s" % text)]
3233

    
3234
  if count > 20:
3235
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3236
    question = msg
3237
  else:
3238
    question = msg + affected
3239

    
3240
  choice = AskUser(question, choices)
3241
  if choice == "v":
3242
    choices.pop(1)
3243
    choice = AskUser(msg + affected, choices)
3244
  return choice