Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 8d99a8bf

History | View | Annotate | Download (100.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
from cStringIO import StringIO
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import opcodes
38
from ganeti import luxi
39
from ganeti import ssconf
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45

    
46
from optparse import (OptionParser, TitledHelpFormatter,
47
                      Option, OptionValueError)
48

    
49

    
50
__all__ = [
51
  # Command line options
52
  "ADD_UIDS_OPT",
53
  "ALLOCATABLE_OPT",
54
  "ALLOC_POLICY_OPT",
55
  "ALL_OPT",
56
  "ALLOW_FAILOVER_OPT",
57
  "AUTO_PROMOTE_OPT",
58
  "AUTO_REPLACE_OPT",
59
  "BACKEND_OPT",
60
  "BLK_OS_OPT",
61
  "CAPAB_MASTER_OPT",
62
  "CAPAB_VM_OPT",
63
  "CLEANUP_OPT",
64
  "CLUSTER_DOMAIN_SECRET_OPT",
65
  "CONFIRM_OPT",
66
  "CP_SIZE_OPT",
67
  "DEBUG_OPT",
68
  "DEBUG_SIMERR_OPT",
69
  "DISKIDX_OPT",
70
  "DISK_OPT",
71
  "DISK_TEMPLATE_OPT",
72
  "DRAINED_OPT",
73
  "DRY_RUN_OPT",
74
  "DRBD_HELPER_OPT",
75
  "DST_NODE_OPT",
76
  "EARLY_RELEASE_OPT",
77
  "ENABLED_HV_OPT",
78
  "ERROR_CODES_OPT",
79
  "FIELDS_OPT",
80
  "FILESTORE_DIR_OPT",
81
  "FILESTORE_DRIVER_OPT",
82
  "FORCE_FILTER_OPT",
83
  "FORCE_OPT",
84
  "FORCE_VARIANT_OPT",
85
  "GLOBAL_FILEDIR_OPT",
86
  "HID_OS_OPT",
87
  "GLOBAL_SHARED_FILEDIR_OPT",
88
  "HVLIST_OPT",
89
  "HVOPTS_OPT",
90
  "HYPERVISOR_OPT",
91
  "IALLOCATOR_OPT",
92
  "DEFAULT_IALLOCATOR_OPT",
93
  "IDENTIFY_DEFAULTS_OPT",
94
  "IGNORE_CONSIST_OPT",
95
  "IGNORE_FAILURES_OPT",
96
  "IGNORE_OFFLINE_OPT",
97
  "IGNORE_REMOVE_FAILURES_OPT",
98
  "IGNORE_SECONDARIES_OPT",
99
  "IGNORE_SIZE_OPT",
100
  "INTERVAL_OPT",
101
  "MAC_PREFIX_OPT",
102
  "MAINTAIN_NODE_HEALTH_OPT",
103
  "MASTER_NETDEV_OPT",
104
  "MC_OPT",
105
  "MIGRATION_MODE_OPT",
106
  "NET_OPT",
107
  "NEW_CLUSTER_CERT_OPT",
108
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109
  "NEW_CONFD_HMAC_KEY_OPT",
110
  "NEW_RAPI_CERT_OPT",
111
  "NEW_SECONDARY_OPT",
112
  "NIC_PARAMS_OPT",
113
  "NODE_FORCE_JOIN_OPT",
114
  "NODE_LIST_OPT",
115
  "NODE_PLACEMENT_OPT",
116
  "NODEGROUP_OPT",
117
  "NODE_PARAMS_OPT",
118
  "NODE_POWERED_OPT",
119
  "NODRBD_STORAGE_OPT",
120
  "NOHDR_OPT",
121
  "NOIPCHECK_OPT",
122
  "NO_INSTALL_OPT",
123
  "NONAMECHECK_OPT",
124
  "NOLVM_STORAGE_OPT",
125
  "NOMODIFY_ETCHOSTS_OPT",
126
  "NOMODIFY_SSH_SETUP_OPT",
127
  "NONICS_OPT",
128
  "NONLIVE_OPT",
129
  "NONPLUS1_OPT",
130
  "NOSHUTDOWN_OPT",
131
  "NOSTART_OPT",
132
  "NOSSH_KEYCHECK_OPT",
133
  "NOVOTING_OPT",
134
  "NO_REMEMBER_OPT",
135
  "NWSYNC_OPT",
136
  "ON_PRIMARY_OPT",
137
  "ON_SECONDARY_OPT",
138
  "OFFLINE_OPT",
139
  "OSPARAMS_OPT",
140
  "OS_OPT",
141
  "OS_SIZE_OPT",
142
  "OOB_TIMEOUT_OPT",
143
  "POWER_DELAY_OPT",
144
  "PREALLOC_WIPE_DISKS_OPT",
145
  "PRIMARY_IP_VERSION_OPT",
146
  "PRIORITY_OPT",
147
  "RAPI_CERT_OPT",
148
  "READD_OPT",
149
  "REBOOT_TYPE_OPT",
150
  "REMOVE_INSTANCE_OPT",
151
  "REMOVE_UIDS_OPT",
152
  "RESERVED_LVS_OPT",
153
  "ROMAN_OPT",
154
  "SECONDARY_IP_OPT",
155
  "SELECT_OS_OPT",
156
  "SEP_OPT",
157
  "SHOWCMD_OPT",
158
  "SHUTDOWN_TIMEOUT_OPT",
159
  "SINGLE_NODE_OPT",
160
  "SRC_DIR_OPT",
161
  "SRC_NODE_OPT",
162
  "SUBMIT_OPT",
163
  "STATIC_OPT",
164
  "SYNC_OPT",
165
  "TAG_SRC_OPT",
166
  "TIMEOUT_OPT",
167
  "UIDPOOL_OPT",
168
  "USEUNITS_OPT",
169
  "USE_REPL_NET_OPT",
170
  "VERBOSE_OPT",
171
  "VG_NAME_OPT",
172
  "YES_DOIT_OPT",
173
  # Generic functions for CLI programs
174
  "ConfirmOperation",
175
  "GenericMain",
176
  "GenericInstanceCreate",
177
  "GenericList",
178
  "GenericListFields",
179
  "GetClient",
180
  "GetOnlineNodes",
181
  "JobExecutor",
182
  "JobSubmittedException",
183
  "ParseTimespec",
184
  "RunWhileClusterStopped",
185
  "SubmitOpCode",
186
  "SubmitOrSend",
187
  "UsesRPC",
188
  # Formatting functions
189
  "ToStderr", "ToStdout",
190
  "FormatError",
191
  "FormatQueryResult",
192
  "FormatParameterDict",
193
  "GenerateTable",
194
  "AskUser",
195
  "FormatTimestamp",
196
  "FormatLogMessage",
197
  # Tags functions
198
  "ListTags",
199
  "AddTags",
200
  "RemoveTags",
201
  # command line options support infrastructure
202
  "ARGS_MANY_INSTANCES",
203
  "ARGS_MANY_NODES",
204
  "ARGS_MANY_GROUPS",
205
  "ARGS_NONE",
206
  "ARGS_ONE_INSTANCE",
207
  "ARGS_ONE_NODE",
208
  "ARGS_ONE_GROUP",
209
  "ARGS_ONE_OS",
210
  "ArgChoice",
211
  "ArgCommand",
212
  "ArgFile",
213
  "ArgGroup",
214
  "ArgHost",
215
  "ArgInstance",
216
  "ArgJobId",
217
  "ArgNode",
218
  "ArgOs",
219
  "ArgSuggest",
220
  "ArgUnknown",
221
  "OPT_COMPL_INST_ADD_NODES",
222
  "OPT_COMPL_MANY_NODES",
223
  "OPT_COMPL_ONE_IALLOCATOR",
224
  "OPT_COMPL_ONE_INSTANCE",
225
  "OPT_COMPL_ONE_NODE",
226
  "OPT_COMPL_ONE_NODEGROUP",
227
  "OPT_COMPL_ONE_OS",
228
  "cli_option",
229
  "SplitNodeOption",
230
  "CalculateOSNames",
231
  "ParseFields",
232
  "COMMON_CREATE_OPTS",
233
  ]
234

    
235
NO_PREFIX = "no_"
236
UN_PREFIX = "-"
237

    
238
#: Priorities (sorted)
239
_PRIORITY_NAMES = [
240
  ("low", constants.OP_PRIO_LOW),
241
  ("normal", constants.OP_PRIO_NORMAL),
242
  ("high", constants.OP_PRIO_HIGH),
243
  ]
244

    
245
#: Priority dictionary for easier lookup
246
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
247
# we migrate to Python 2.6
248
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
249

    
250
# Query result status for clients
251
(QR_NORMAL,
252
 QR_UNKNOWN,
253
 QR_INCOMPLETE) = range(3)
254

    
255

    
256
class _Argument:
257
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
258
    self.min = min
259
    self.max = max
260

    
261
  def __repr__(self):
262
    return ("<%s min=%s max=%s>" %
263
            (self.__class__.__name__, self.min, self.max))
264

    
265

    
266
class ArgSuggest(_Argument):
267
  """Suggesting argument.
268

269
  Value can be any of the ones passed to the constructor.
270

271
  """
272
  # pylint: disable-msg=W0622
273
  def __init__(self, min=0, max=None, choices=None):
274
    _Argument.__init__(self, min=min, max=max)
275
    self.choices = choices
276

    
277
  def __repr__(self):
278
    return ("<%s min=%s max=%s choices=%r>" %
279
            (self.__class__.__name__, self.min, self.max, self.choices))
280

    
281

    
282
class ArgChoice(ArgSuggest):
283
  """Choice argument.
284

285
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
286
  but value must be one of the choices.
287

288
  """
289

    
290

    
291
class ArgUnknown(_Argument):
292
  """Unknown argument to program (e.g. determined at runtime).
293

294
  """
295

    
296

    
297
class ArgInstance(_Argument):
298
  """Instances argument.
299

300
  """
301

    
302

    
303
class ArgNode(_Argument):
304
  """Node argument.
305

306
  """
307

    
308

    
309
class ArgGroup(_Argument):
310
  """Node group argument.
311

312
  """
313

    
314

    
315
class ArgJobId(_Argument):
316
  """Job ID argument.
317

318
  """
319

    
320

    
321
class ArgFile(_Argument):
322
  """File path argument.
323

324
  """
325

    
326

    
327
class ArgCommand(_Argument):
328
  """Command argument.
329

330
  """
331

    
332

    
333
class ArgHost(_Argument):
334
  """Host argument.
335

336
  """
337

    
338

    
339
class ArgOs(_Argument):
340
  """OS argument.
341

342
  """
343

    
344

    
345
ARGS_NONE = []
346
ARGS_MANY_INSTANCES = [ArgInstance()]
347
ARGS_MANY_NODES = [ArgNode()]
348
ARGS_MANY_GROUPS = [ArgGroup()]
349
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
350
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
351
# TODO
352
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
353
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
354

    
355

    
356
def _ExtractTagsObject(opts, args):
357
  """Extract the tag type object.
358

359
  Note that this function will modify its args parameter.
360

361
  """
362
  if not hasattr(opts, "tag_type"):
363
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
364
  kind = opts.tag_type
365
  if kind == constants.TAG_CLUSTER:
366
    retval = kind, kind
367
  elif kind in (constants.TAG_NODEGROUP,
368
                constants.TAG_NODE,
369
                constants.TAG_INSTANCE):
370
    if not args:
371
      raise errors.OpPrereqError("no arguments passed to the command")
372
    name = args.pop(0)
373
    retval = kind, name
374
  else:
375
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
376
  return retval
377

    
378

    
379
def _ExtendTags(opts, args):
380
  """Extend the args if a source file has been given.
381

382
  This function will extend the tags with the contents of the file
383
  passed in the 'tags_source' attribute of the opts parameter. A file
384
  named '-' will be replaced by stdin.
385

386
  """
387
  fname = opts.tags_source
388
  if fname is None:
389
    return
390
  if fname == "-":
391
    new_fh = sys.stdin
392
  else:
393
    new_fh = open(fname, "r")
394
  new_data = []
395
  try:
396
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
397
    # because of python bug 1633941
398
    while True:
399
      line = new_fh.readline()
400
      if not line:
401
        break
402
      new_data.append(line.strip())
403
  finally:
404
    new_fh.close()
405
  args.extend(new_data)
406

    
407

    
408
def ListTags(opts, args):
409
  """List the tags on a given object.
410

411
  This is a generic implementation that knows how to deal with all
412
  three cases of tag objects (cluster, node, instance). The opts
413
  argument is expected to contain a tag_type field denoting what
414
  object type we work on.
415

416
  """
417
  kind, name = _ExtractTagsObject(opts, args)
418
  cl = GetClient()
419
  result = cl.QueryTags(kind, name)
420
  result = list(result)
421
  result.sort()
422
  for tag in result:
423
    ToStdout(tag)
424

    
425

    
426
def AddTags(opts, args):
427
  """Add tags on a given object.
428

429
  This is a generic implementation that knows how to deal with all
430
  three cases of tag objects (cluster, node, instance). The opts
431
  argument is expected to contain a tag_type field denoting what
432
  object type we work on.
433

434
  """
435
  kind, name = _ExtractTagsObject(opts, args)
436
  _ExtendTags(opts, args)
437
  if not args:
438
    raise errors.OpPrereqError("No tags to be added")
439
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
440
  SubmitOpCode(op, opts=opts)
441

    
442

    
443
def RemoveTags(opts, args):
444
  """Remove tags from a given object.
445

446
  This is a generic implementation that knows how to deal with all
447
  three cases of tag objects (cluster, node, instance). The opts
448
  argument is expected to contain a tag_type field denoting what
449
  object type we work on.
450

451
  """
452
  kind, name = _ExtractTagsObject(opts, args)
453
  _ExtendTags(opts, args)
454
  if not args:
455
    raise errors.OpPrereqError("No tags to be removed")
456
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
457
  SubmitOpCode(op, opts=opts)
458

    
459

    
460
def check_unit(option, opt, value): # pylint: disable-msg=W0613
461
  """OptParsers custom converter for units.
462

463
  """
464
  try:
465
    return utils.ParseUnit(value)
466
  except errors.UnitParseError, err:
467
    raise OptionValueError("option %s: %s" % (opt, err))
468

    
469

    
470
def _SplitKeyVal(opt, data):
471
  """Convert a KeyVal string into a dict.
472

473
  This function will convert a key=val[,...] string into a dict. Empty
474
  values will be converted specially: keys which have the prefix 'no_'
475
  will have the value=False and the prefix stripped, the others will
476
  have value=True.
477

478
  @type opt: string
479
  @param opt: a string holding the option name for which we process the
480
      data, used in building error messages
481
  @type data: string
482
  @param data: a string of the format key=val,key=val,...
483
  @rtype: dict
484
  @return: {key=val, key=val}
485
  @raises errors.ParameterError: if there are duplicate keys
486

487
  """
488
  kv_dict = {}
489
  if data:
490
    for elem in utils.UnescapeAndSplit(data, sep=","):
491
      if "=" in elem:
492
        key, val = elem.split("=", 1)
493
      else:
494
        if elem.startswith(NO_PREFIX):
495
          key, val = elem[len(NO_PREFIX):], False
496
        elif elem.startswith(UN_PREFIX):
497
          key, val = elem[len(UN_PREFIX):], None
498
        else:
499
          key, val = elem, True
500
      if key in kv_dict:
501
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
502
                                    (key, opt))
503
      kv_dict[key] = val
504
  return kv_dict
505

    
506

    
507
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
508
  """Custom parser for ident:key=val,key=val options.
509

510
  This will store the parsed values as a tuple (ident, {key: val}). As such,
511
  multiple uses of this option via action=append is possible.
512

513
  """
514
  if ":" not in value:
515
    ident, rest = value, ''
516
  else:
517
    ident, rest = value.split(":", 1)
518

    
519
  if ident.startswith(NO_PREFIX):
520
    if rest:
521
      msg = "Cannot pass options when removing parameter groups: %s" % value
522
      raise errors.ParameterError(msg)
523
    retval = (ident[len(NO_PREFIX):], False)
524
  elif ident.startswith(UN_PREFIX):
525
    if rest:
526
      msg = "Cannot pass options when removing parameter groups: %s" % value
527
      raise errors.ParameterError(msg)
528
    retval = (ident[len(UN_PREFIX):], None)
529
  else:
530
    kv_dict = _SplitKeyVal(opt, rest)
531
    retval = (ident, kv_dict)
532
  return retval
533

    
534

    
535
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
536
  """Custom parser class for key=val,key=val options.
537

538
  This will store the parsed values as a dict {key: val}.
539

540
  """
541
  return _SplitKeyVal(opt, value)
542

    
543

    
544
def check_bool(option, opt, value): # pylint: disable-msg=W0613
545
  """Custom parser for yes/no options.
546

547
  This will store the parsed value as either True or False.
548

549
  """
550
  value = value.lower()
551
  if value == constants.VALUE_FALSE or value == "no":
552
    return False
553
  elif value == constants.VALUE_TRUE or value == "yes":
554
    return True
555
  else:
556
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
557

    
558

    
559
# completion_suggestion is normally a list. Using numeric values not evaluating
560
# to False for dynamic completion.
561
(OPT_COMPL_MANY_NODES,
562
 OPT_COMPL_ONE_NODE,
563
 OPT_COMPL_ONE_INSTANCE,
564
 OPT_COMPL_ONE_OS,
565
 OPT_COMPL_ONE_IALLOCATOR,
566
 OPT_COMPL_INST_ADD_NODES,
567
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
568

    
569
OPT_COMPL_ALL = frozenset([
570
  OPT_COMPL_MANY_NODES,
571
  OPT_COMPL_ONE_NODE,
572
  OPT_COMPL_ONE_INSTANCE,
573
  OPT_COMPL_ONE_OS,
574
  OPT_COMPL_ONE_IALLOCATOR,
575
  OPT_COMPL_INST_ADD_NODES,
576
  OPT_COMPL_ONE_NODEGROUP,
577
  ])
578

    
579

    
580
class CliOption(Option):
581
  """Custom option class for optparse.
582

583
  """
584
  ATTRS = Option.ATTRS + [
585
    "completion_suggest",
586
    ]
587
  TYPES = Option.TYPES + (
588
    "identkeyval",
589
    "keyval",
590
    "unit",
591
    "bool",
592
    )
593
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
594
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
595
  TYPE_CHECKER["keyval"] = check_key_val
596
  TYPE_CHECKER["unit"] = check_unit
597
  TYPE_CHECKER["bool"] = check_bool
598

    
599

    
600
# optparse.py sets make_option, so we do it for our own option class, too
601
cli_option = CliOption
602

    
603

    
604
_YORNO = "yes|no"
605

    
606
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
607
                       help="Increase debugging level")
608

    
609
NOHDR_OPT = cli_option("--no-headers", default=False,
610
                       action="store_true", dest="no_headers",
611
                       help="Don't display column headers")
612

    
613
SEP_OPT = cli_option("--separator", default=None,
614
                     action="store", dest="separator",
615
                     help=("Separator between output fields"
616
                           " (defaults to one space)"))
617

    
618
USEUNITS_OPT = cli_option("--units", default=None,
619
                          dest="units", choices=('h', 'm', 'g', 't'),
620
                          help="Specify units for output (one of h/m/g/t)")
621

    
622
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
623
                        type="string", metavar="FIELDS",
624
                        help="Comma separated list of output fields")
625

    
626
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
627
                       default=False, help="Force the operation")
628

    
629
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
630
                         default=False, help="Do not require confirmation")
631

    
632
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
633
                                  action="store_true", default=False,
634
                                  help=("Ignore offline nodes and do as much"
635
                                        " as possible"))
636

    
637
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
638
                         default=None, help="File with tag names")
639

    
640
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
641
                        default=False, action="store_true",
642
                        help=("Submit the job and return the job ID, but"
643
                              " don't wait for the job to finish"))
644

    
645
SYNC_OPT = cli_option("--sync", dest="do_locking",
646
                      default=False, action="store_true",
647
                      help=("Grab locks while doing the queries"
648
                            " in order to ensure more consistent results"))
649

    
650
DRY_RUN_OPT = cli_option("--dry-run", default=False,
651
                         action="store_true",
652
                         help=("Do not execute the operation, just run the"
653
                               " check steps and verify it it could be"
654
                               " executed"))
655

    
656
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
657
                         action="store_true",
658
                         help="Increase the verbosity of the operation")
659

    
660
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
661
                              action="store_true", dest="simulate_errors",
662
                              help="Debugging option that makes the operation"
663
                              " treat most runtime checks as failed")
664

    
665
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
666
                        default=True, action="store_false",
667
                        help="Don't wait for sync (DANGEROUS!)")
668

    
669
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
670
                               help=("Custom disk setup (%s)" %
671
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
672
                               default=None, metavar="TEMPL",
673
                               choices=list(constants.DISK_TEMPLATES))
674

    
675
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
676
                        help="Do not create any network cards for"
677
                        " the instance")
678

    
679
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
680
                               help="Relative path under default cluster-wide"
681
                               " file storage dir to store file-based disks",
682
                               default=None, metavar="<DIR>")
683

    
684
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
685
                                  help="Driver to use for image files",
686
                                  default="loop", metavar="<DRIVER>",
687
                                  choices=list(constants.FILE_DRIVER))
688

    
689
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
690
                            help="Select nodes for the instance automatically"
691
                            " using the <NAME> iallocator plugin",
692
                            default=None, type="string",
693
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
694

    
695
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
696
                            metavar="<NAME>",
697
                            help="Set the default instance allocator plugin",
698
                            default=None, type="string",
699
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
700

    
701
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
702
                    metavar="<os>",
703
                    completion_suggest=OPT_COMPL_ONE_OS)
704

    
705
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
706
                         type="keyval", default={},
707
                         help="OS parameters")
708

    
709
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
710
                               action="store_true", default=False,
711
                               help="Force an unknown variant")
712

    
713
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
714
                            action="store_true", default=False,
715
                            help="Do not install the OS (will"
716
                            " enable no-start)")
717

    
718
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
719
                         type="keyval", default={},
720
                         help="Backend parameters")
721

    
722
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
723
                         default={}, dest="hvparams",
724
                         help="Hypervisor parameters")
725

    
726
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
727
                            help="Hypervisor and hypervisor options, in the"
728
                            " format hypervisor:option=value,option=value,...",
729
                            default=None, type="identkeyval")
730

    
731
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
732
                        help="Hypervisor and hypervisor options, in the"
733
                        " format hypervisor:option=value,option=value,...",
734
                        default=[], action="append", type="identkeyval")
735

    
736
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
737
                           action="store_false",
738
                           help="Don't check that the instance's IP"
739
                           " is alive")
740

    
741
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
742
                             default=True, action="store_false",
743
                             help="Don't check that the instance's name"
744
                             " is resolvable")
745

    
746
NET_OPT = cli_option("--net",
747
                     help="NIC parameters", default=[],
748
                     dest="nics", action="append", type="identkeyval")
749

    
750
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
751
                      dest="disks", action="append", type="identkeyval")
752

    
753
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
754
                         help="Comma-separated list of disks"
755
                         " indices to act on (e.g. 0,2) (optional,"
756
                         " defaults to all disks)")
757

    
758
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
759
                         help="Enforces a single-disk configuration using the"
760
                         " given disk size, in MiB unless a suffix is used",
761
                         default=None, type="unit", metavar="<size>")
762

    
763
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
764
                                dest="ignore_consistency",
765
                                action="store_true", default=False,
766
                                help="Ignore the consistency of the disks on"
767
                                " the secondary")
768

    
769
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
770
                                dest="allow_failover",
771
                                action="store_true", default=False,
772
                                help="If migration is not possible fallback to"
773
                                     " failover")
774

    
775
NONLIVE_OPT = cli_option("--non-live", dest="live",
776
                         default=True, action="store_false",
777
                         help="Do a non-live migration (this usually means"
778
                         " freeze the instance, save the state, transfer and"
779
                         " only then resume running on the secondary node)")
780

    
781
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
782
                                default=None,
783
                                choices=list(constants.HT_MIGRATION_MODES),
784
                                help="Override default migration mode (choose"
785
                                " either live or non-live")
786

    
787
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
788
                                help="Target node and optional secondary node",
789
                                metavar="<pnode>[:<snode>]",
790
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
791

    
792
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
793
                           action="append", metavar="<node>",
794
                           help="Use only this node (can be used multiple"
795
                           " times, if not given defaults to all nodes)",
796
                           completion_suggest=OPT_COMPL_ONE_NODE)
797

    
798
NODEGROUP_OPT = cli_option("-g", "--node-group",
799
                           dest="nodegroup",
800
                           help="Node group (name or uuid)",
801
                           metavar="<nodegroup>",
802
                           default=None, type="string",
803
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
804

    
805
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
806
                             metavar="<node>",
807
                             completion_suggest=OPT_COMPL_ONE_NODE)
808

    
809
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
810
                         action="store_false",
811
                         help="Don't start the instance after creation")
812

    
813
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
814
                         action="store_true", default=False,
815
                         help="Show command instead of executing it")
816

    
817
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
818
                         default=False, action="store_true",
819
                         help="Instead of performing the migration, try to"
820
                         " recover from a failed cleanup. This is safe"
821
                         " to run even if the instance is healthy, but it"
822
                         " will create extra replication traffic and "
823
                         " disrupt briefly the replication (like during the"
824
                         " migration")
825

    
826
STATIC_OPT = cli_option("-s", "--static", dest="static",
827
                        action="store_true", default=False,
828
                        help="Only show configuration data, not runtime data")
829

    
830
ALL_OPT = cli_option("--all", dest="show_all",
831
                     default=False, action="store_true",
832
                     help="Show info on all instances on the cluster."
833
                     " This can take a long time to run, use wisely")
834

    
835
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
836
                           action="store_true", default=False,
837
                           help="Interactive OS reinstall, lists available"
838
                           " OS templates for selection")
839

    
840
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
841
                                 action="store_true", default=False,
842
                                 help="Remove the instance from the cluster"
843
                                 " configuration even if there are failures"
844
                                 " during the removal process")
845

    
846
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
847
                                        dest="ignore_remove_failures",
848
                                        action="store_true", default=False,
849
                                        help="Remove the instance from the"
850
                                        " cluster configuration even if there"
851
                                        " are failures during the removal"
852
                                        " process")
853

    
854
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
855
                                 action="store_true", default=False,
856
                                 help="Remove the instance from the cluster")
857

    
858
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
859
                               help="Specifies the new node for the instance",
860
                               metavar="NODE", default=None,
861
                               completion_suggest=OPT_COMPL_ONE_NODE)
862

    
863
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
864
                               help="Specifies the new secondary node",
865
                               metavar="NODE", default=None,
866
                               completion_suggest=OPT_COMPL_ONE_NODE)
867

    
868
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
869
                            default=False, action="store_true",
870
                            help="Replace the disk(s) on the primary"
871
                                 " node (applies only to internally mirrored"
872
                                 " disk templates, e.g. %s)" %
873
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
874

    
875
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
876
                              default=False, action="store_true",
877
                              help="Replace the disk(s) on the secondary"
878
                                   " node (applies only to internally mirrored"
879
                                   " disk templates, e.g. %s)" %
880
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
881

    
882
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
883
                              default=False, action="store_true",
884
                              help="Lock all nodes and auto-promote as needed"
885
                              " to MC status")
886

    
887
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
888
                              default=False, action="store_true",
889
                              help="Automatically replace faulty disks"
890
                                   " (applies only to internally mirrored"
891
                                   " disk templates, e.g. %s)" %
892
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
893

    
894
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
895
                             default=False, action="store_true",
896
                             help="Ignore current recorded size"
897
                             " (useful for forcing activation when"
898
                             " the recorded size is wrong)")
899

    
900
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
901
                          metavar="<node>",
902
                          completion_suggest=OPT_COMPL_ONE_NODE)
903

    
904
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
905
                         metavar="<dir>")
906

    
907
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
908
                              help="Specify the secondary ip for the node",
909
                              metavar="ADDRESS", default=None)
910

    
911
READD_OPT = cli_option("--readd", dest="readd",
912
                       default=False, action="store_true",
913
                       help="Readd old node after replacing it")
914

    
915
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
916
                                default=True, action="store_false",
917
                                help="Disable SSH key fingerprint checking")
918

    
919
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
920
                                 default=False, action="store_true",
921
                                 help="Force the joining of a node")
922

    
923
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
924
                    type="bool", default=None, metavar=_YORNO,
925
                    help="Set the master_candidate flag on the node")
926

    
927
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
928
                         type="bool", default=None,
929
                         help=("Set the offline flag on the node"
930
                               " (cluster does not communicate with offline"
931
                               " nodes)"))
932

    
933
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
934
                         type="bool", default=None,
935
                         help=("Set the drained flag on the node"
936
                               " (excluded from allocation operations)"))
937

    
938
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
939
                    type="bool", default=None, metavar=_YORNO,
940
                    help="Set the master_capable flag on the node")
941

    
942
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
943
                    type="bool", default=None, metavar=_YORNO,
944
                    help="Set the vm_capable flag on the node")
945

    
946
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
947
                             type="bool", default=None, metavar=_YORNO,
948
                             help="Set the allocatable flag on a volume")
949

    
950
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
951
                               help="Disable support for lvm based instances"
952
                               " (cluster-wide)",
953
                               action="store_false", default=True)
954

    
955
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
956
                            dest="enabled_hypervisors",
957
                            help="Comma-separated list of hypervisors",
958
                            type="string", default=None)
959

    
960
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
961
                            type="keyval", default={},
962
                            help="NIC parameters")
963

    
964
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
965
                         dest="candidate_pool_size", type="int",
966
                         help="Set the candidate pool size")
967

    
968
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
969
                         help=("Enables LVM and specifies the volume group"
970
                               " name (cluster-wide) for disk allocation"
971
                               " [%s]" % constants.DEFAULT_VG),
972
                         metavar="VG", default=None)
973

    
974
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
975
                          help="Destroy cluster", action="store_true")
976

    
977
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
978
                          help="Skip node agreement check (dangerous)",
979
                          action="store_true", default=False)
980

    
981
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
982
                            help="Specify the mac prefix for the instance IP"
983
                            " addresses, in the format XX:XX:XX",
984
                            metavar="PREFIX",
985
                            default=None)
986

    
987
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
988
                               help="Specify the node interface (cluster-wide)"
989
                               " on which the master IP address will be added"
990
                               " (cluster init default: %s)" %
991
                               constants.DEFAULT_BRIDGE,
992
                               metavar="NETDEV",
993
                               default=None)
994

    
995
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
996
                                help="Specify the default directory (cluster-"
997
                                "wide) for storing the file-based disks [%s]" %
998
                                constants.DEFAULT_FILE_STORAGE_DIR,
999
                                metavar="DIR",
1000
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1001

    
1002
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1003
                            dest="shared_file_storage_dir",
1004
                            help="Specify the default directory (cluster-"
1005
                            "wide) for storing the shared file-based"
1006
                            " disks [%s]" %
1007
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1008
                            metavar="SHAREDDIR",
1009
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1010

    
1011
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1012
                                   help="Don't modify /etc/hosts",
1013
                                   action="store_false", default=True)
1014

    
1015
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1016
                                    help="Don't initialize SSH keys",
1017
                                    action="store_false", default=True)
1018

    
1019
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1020
                             help="Enable parseable error messages",
1021
                             action="store_true", default=False)
1022

    
1023
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1024
                          help="Skip N+1 memory redundancy tests",
1025
                          action="store_true", default=False)
1026

    
1027
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1028
                             help="Type of reboot: soft/hard/full",
1029
                             default=constants.INSTANCE_REBOOT_HARD,
1030
                             metavar="<REBOOT>",
1031
                             choices=list(constants.REBOOT_TYPES))
1032

    
1033
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1034
                                    dest="ignore_secondaries",
1035
                                    default=False, action="store_true",
1036
                                    help="Ignore errors from secondaries")
1037

    
1038
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1039
                            action="store_false", default=True,
1040
                            help="Don't shutdown the instance (unsafe)")
1041

    
1042
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1043
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1044
                         help="Maximum time to wait")
1045

    
1046
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1047
                         dest="shutdown_timeout", type="int",
1048
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1049
                         help="Maximum time to wait for instance shutdown")
1050

    
1051
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1052
                          default=None,
1053
                          help=("Number of seconds between repetions of the"
1054
                                " command"))
1055

    
1056
EARLY_RELEASE_OPT = cli_option("--early-release",
1057
                               dest="early_release", default=False,
1058
                               action="store_true",
1059
                               help="Release the locks on the secondary"
1060
                               " node(s) early")
1061

    
1062
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1063
                                  dest="new_cluster_cert",
1064
                                  default=False, action="store_true",
1065
                                  help="Generate a new cluster certificate")
1066

    
1067
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1068
                           default=None,
1069
                           help="File containing new RAPI certificate")
1070

    
1071
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1072
                               default=None, action="store_true",
1073
                               help=("Generate a new self-signed RAPI"
1074
                                     " certificate"))
1075

    
1076
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1077
                                    dest="new_confd_hmac_key",
1078
                                    default=False, action="store_true",
1079
                                    help=("Create a new HMAC key for %s" %
1080
                                          constants.CONFD))
1081

    
1082
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1083
                                       dest="cluster_domain_secret",
1084
                                       default=None,
1085
                                       help=("Load new new cluster domain"
1086
                                             " secret from file"))
1087

    
1088
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1089
                                           dest="new_cluster_domain_secret",
1090
                                           default=False, action="store_true",
1091
                                           help=("Create a new cluster domain"
1092
                                                 " secret"))
1093

    
1094
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1095
                              dest="use_replication_network",
1096
                              help="Whether to use the replication network"
1097
                              " for talking to the nodes",
1098
                              action="store_true", default=False)
1099

    
1100
MAINTAIN_NODE_HEALTH_OPT = \
1101
    cli_option("--maintain-node-health", dest="maintain_node_health",
1102
               metavar=_YORNO, default=None, type="bool",
1103
               help="Configure the cluster to automatically maintain node"
1104
               " health, by shutting down unknown instances, shutting down"
1105
               " unknown DRBD devices, etc.")
1106

    
1107
IDENTIFY_DEFAULTS_OPT = \
1108
    cli_option("--identify-defaults", dest="identify_defaults",
1109
               default=False, action="store_true",
1110
               help="Identify which saved instance parameters are equal to"
1111
               " the current cluster defaults and set them as such, instead"
1112
               " of marking them as overridden")
1113

    
1114
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1115
                         action="store", dest="uid_pool",
1116
                         help=("A list of user-ids or user-id"
1117
                               " ranges separated by commas"))
1118

    
1119
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1120
                          action="store", dest="add_uids",
1121
                          help=("A list of user-ids or user-id"
1122
                                " ranges separated by commas, to be"
1123
                                " added to the user-id pool"))
1124

    
1125
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1126
                             action="store", dest="remove_uids",
1127
                             help=("A list of user-ids or user-id"
1128
                                   " ranges separated by commas, to be"
1129
                                   " removed from the user-id pool"))
1130

    
1131
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1132
                             action="store", dest="reserved_lvs",
1133
                             help=("A comma-separated list of reserved"
1134
                                   " logical volumes names, that will be"
1135
                                   " ignored by cluster verify"))
1136

    
1137
ROMAN_OPT = cli_option("--roman",
1138
                       dest="roman_integers", default=False,
1139
                       action="store_true",
1140
                       help="Use roman numbers for positive integers")
1141

    
1142
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1143
                             action="store", default=None,
1144
                             help="Specifies usermode helper for DRBD")
1145

    
1146
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1147
                                action="store_false", default=True,
1148
                                help="Disable support for DRBD")
1149

    
1150
PRIMARY_IP_VERSION_OPT = \
1151
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1152
               action="store", dest="primary_ip_version",
1153
               metavar="%d|%d" % (constants.IP4_VERSION,
1154
                                  constants.IP6_VERSION),
1155
               help="Cluster-wide IP version for primary IP")
1156

    
1157
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1158
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1159
                          choices=_PRIONAME_TO_VALUE.keys(),
1160
                          help="Priority for opcode processing")
1161

    
1162
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1163
                        type="bool", default=None, metavar=_YORNO,
1164
                        help="Sets the hidden flag on the OS")
1165

    
1166
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1167
                        type="bool", default=None, metavar=_YORNO,
1168
                        help="Sets the blacklisted flag on the OS")
1169

    
1170
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1171
                                     type="bool", metavar=_YORNO,
1172
                                     dest="prealloc_wipe_disks",
1173
                                     help=("Wipe disks prior to instance"
1174
                                           " creation"))
1175

    
1176
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1177
                             type="keyval", default=None,
1178
                             help="Node parameters")
1179

    
1180
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1181
                              action="store", metavar="POLICY", default=None,
1182
                              help="Allocation policy for the node group")
1183

    
1184
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1185
                              type="bool", metavar=_YORNO,
1186
                              dest="node_powered",
1187
                              help="Specify if the SoR for node is powered")
1188

    
1189
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1190
                         default=constants.OOB_TIMEOUT,
1191
                         help="Maximum time to wait for out-of-band helper")
1192

    
1193
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1194
                             default=constants.OOB_POWER_DELAY,
1195
                             help="Time in seconds to wait between power-ons")
1196

    
1197
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1198
                              action="store_true", default=False,
1199
                              help=("Whether command argument should be treated"
1200
                                    " as filter"))
1201

    
1202
NO_REMEMBER_OPT = cli_option("--no-remember",
1203
                             dest="no_remember",
1204
                             action="store_true", default=False,
1205
                             help="Perform but do not record the change"
1206
                             " in the configuration")
1207

    
1208

    
1209
#: Options provided by all commands
1210
COMMON_OPTS = [DEBUG_OPT]
1211

    
1212
# common options for creating instances. add and import then add their own
1213
# specific ones.
1214
COMMON_CREATE_OPTS = [
1215
  BACKEND_OPT,
1216
  DISK_OPT,
1217
  DISK_TEMPLATE_OPT,
1218
  FILESTORE_DIR_OPT,
1219
  FILESTORE_DRIVER_OPT,
1220
  HYPERVISOR_OPT,
1221
  IALLOCATOR_OPT,
1222
  NET_OPT,
1223
  NODE_PLACEMENT_OPT,
1224
  NOIPCHECK_OPT,
1225
  NONAMECHECK_OPT,
1226
  NONICS_OPT,
1227
  NWSYNC_OPT,
1228
  OSPARAMS_OPT,
1229
  OS_SIZE_OPT,
1230
  SUBMIT_OPT,
1231
  DRY_RUN_OPT,
1232
  PRIORITY_OPT,
1233
  ]
1234

    
1235

    
1236
def _ParseArgs(argv, commands, aliases):
1237
  """Parser for the command line arguments.
1238

1239
  This function parses the arguments and returns the function which
1240
  must be executed together with its (modified) arguments.
1241

1242
  @param argv: the command line
1243
  @param commands: dictionary with special contents, see the design
1244
      doc for cmdline handling
1245
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1246

1247
  """
1248
  if len(argv) == 0:
1249
    binary = "<command>"
1250
  else:
1251
    binary = argv[0].split("/")[-1]
1252

    
1253
  if len(argv) > 1 and argv[1] == "--version":
1254
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1255
             constants.RELEASE_VERSION)
1256
    # Quit right away. That way we don't have to care about this special
1257
    # argument. optparse.py does it the same.
1258
    sys.exit(0)
1259

    
1260
  if len(argv) < 2 or not (argv[1] in commands or
1261
                           argv[1] in aliases):
1262
    # let's do a nice thing
1263
    sortedcmds = commands.keys()
1264
    sortedcmds.sort()
1265

    
1266
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1267
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1268
    ToStdout("")
1269

    
1270
    # compute the max line length for cmd + usage
1271
    mlen = max([len(" %s" % cmd) for cmd in commands])
1272
    mlen = min(60, mlen) # should not get here...
1273

    
1274
    # and format a nice command list
1275
    ToStdout("Commands:")
1276
    for cmd in sortedcmds:
1277
      cmdstr = " %s" % (cmd,)
1278
      help_text = commands[cmd][4]
1279
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1280
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1281
      for line in help_lines:
1282
        ToStdout("%-*s   %s", mlen, "", line)
1283

    
1284
    ToStdout("")
1285

    
1286
    return None, None, None
1287

    
1288
  # get command, unalias it, and look it up in commands
1289
  cmd = argv.pop(1)
1290
  if cmd in aliases:
1291
    if cmd in commands:
1292
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1293
                                   " command" % cmd)
1294

    
1295
    if aliases[cmd] not in commands:
1296
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1297
                                   " command '%s'" % (cmd, aliases[cmd]))
1298

    
1299
    cmd = aliases[cmd]
1300

    
1301
  func, args_def, parser_opts, usage, description = commands[cmd]
1302
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1303
                        description=description,
1304
                        formatter=TitledHelpFormatter(),
1305
                        usage="%%prog %s %s" % (cmd, usage))
1306
  parser.disable_interspersed_args()
1307
  options, args = parser.parse_args()
1308

    
1309
  if not _CheckArguments(cmd, args_def, args):
1310
    return None, None, None
1311

    
1312
  return func, options, args
1313

    
1314

    
1315
def _CheckArguments(cmd, args_def, args):
1316
  """Verifies the arguments using the argument definition.
1317

1318
  Algorithm:
1319

1320
    1. Abort with error if values specified by user but none expected.
1321

1322
    1. For each argument in definition
1323

1324
      1. Keep running count of minimum number of values (min_count)
1325
      1. Keep running count of maximum number of values (max_count)
1326
      1. If it has an unlimited number of values
1327

1328
        1. Abort with error if it's not the last argument in the definition
1329

1330
    1. If last argument has limited number of values
1331

1332
      1. Abort with error if number of values doesn't match or is too large
1333

1334
    1. Abort with error if user didn't pass enough values (min_count)
1335

1336
  """
1337
  if args and not args_def:
1338
    ToStderr("Error: Command %s expects no arguments", cmd)
1339
    return False
1340

    
1341
  min_count = None
1342
  max_count = None
1343
  check_max = None
1344

    
1345
  last_idx = len(args_def) - 1
1346

    
1347
  for idx, arg in enumerate(args_def):
1348
    if min_count is None:
1349
      min_count = arg.min
1350
    elif arg.min is not None:
1351
      min_count += arg.min
1352

    
1353
    if max_count is None:
1354
      max_count = arg.max
1355
    elif arg.max is not None:
1356
      max_count += arg.max
1357

    
1358
    if idx == last_idx:
1359
      check_max = (arg.max is not None)
1360

    
1361
    elif arg.max is None:
1362
      raise errors.ProgrammerError("Only the last argument can have max=None")
1363

    
1364
  if check_max:
1365
    # Command with exact number of arguments
1366
    if (min_count is not None and max_count is not None and
1367
        min_count == max_count and len(args) != min_count):
1368
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1369
      return False
1370

    
1371
    # Command with limited number of arguments
1372
    if max_count is not None and len(args) > max_count:
1373
      ToStderr("Error: Command %s expects only %d argument(s)",
1374
               cmd, max_count)
1375
      return False
1376

    
1377
  # Command with some required arguments
1378
  if min_count is not None and len(args) < min_count:
1379
    ToStderr("Error: Command %s expects at least %d argument(s)",
1380
             cmd, min_count)
1381
    return False
1382

    
1383
  return True
1384

    
1385

    
1386
def SplitNodeOption(value):
1387
  """Splits the value of a --node option.
1388

1389
  """
1390
  if value and ':' in value:
1391
    return value.split(':', 1)
1392
  else:
1393
    return (value, None)
1394

    
1395

    
1396
def CalculateOSNames(os_name, os_variants):
1397
  """Calculates all the names an OS can be called, according to its variants.
1398

1399
  @type os_name: string
1400
  @param os_name: base name of the os
1401
  @type os_variants: list or None
1402
  @param os_variants: list of supported variants
1403
  @rtype: list
1404
  @return: list of valid names
1405

1406
  """
1407
  if os_variants:
1408
    return ['%s+%s' % (os_name, v) for v in os_variants]
1409
  else:
1410
    return [os_name]
1411

    
1412

    
1413
def ParseFields(selected, default):
1414
  """Parses the values of "--field"-like options.
1415

1416
  @type selected: string or None
1417
  @param selected: User-selected options
1418
  @type default: list
1419
  @param default: Default fields
1420

1421
  """
1422
  if selected is None:
1423
    return default
1424

    
1425
  if selected.startswith("+"):
1426
    return default + selected[1:].split(",")
1427

    
1428
  return selected.split(",")
1429

    
1430

    
1431
UsesRPC = rpc.RunWithRPC
1432

    
1433

    
1434
def AskUser(text, choices=None):
1435
  """Ask the user a question.
1436

1437
  @param text: the question to ask
1438

1439
  @param choices: list with elements tuples (input_char, return_value,
1440
      description); if not given, it will default to: [('y', True,
1441
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1442
      note that the '?' char is reserved for help
1443

1444
  @return: one of the return values from the choices list; if input is
1445
      not possible (i.e. not running with a tty, we return the last
1446
      entry from the list
1447

1448
  """
1449
  if choices is None:
1450
    choices = [('y', True, 'Perform the operation'),
1451
               ('n', False, 'Do not perform the operation')]
1452
  if not choices or not isinstance(choices, list):
1453
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1454
  for entry in choices:
1455
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1456
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1457

    
1458
  answer = choices[-1][1]
1459
  new_text = []
1460
  for line in text.splitlines():
1461
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1462
  text = "\n".join(new_text)
1463
  try:
1464
    f = file("/dev/tty", "a+")
1465
  except IOError:
1466
    return answer
1467
  try:
1468
    chars = [entry[0] for entry in choices]
1469
    chars[-1] = "[%s]" % chars[-1]
1470
    chars.append('?')
1471
    maps = dict([(entry[0], entry[1]) for entry in choices])
1472
    while True:
1473
      f.write(text)
1474
      f.write('\n')
1475
      f.write("/".join(chars))
1476
      f.write(": ")
1477
      line = f.readline(2).strip().lower()
1478
      if line in maps:
1479
        answer = maps[line]
1480
        break
1481
      elif line == '?':
1482
        for entry in choices:
1483
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1484
        f.write("\n")
1485
        continue
1486
  finally:
1487
    f.close()
1488
  return answer
1489

    
1490

    
1491
class JobSubmittedException(Exception):
1492
  """Job was submitted, client should exit.
1493

1494
  This exception has one argument, the ID of the job that was
1495
  submitted. The handler should print this ID.
1496

1497
  This is not an error, just a structured way to exit from clients.
1498

1499
  """
1500

    
1501

    
1502
def SendJob(ops, cl=None):
1503
  """Function to submit an opcode without waiting for the results.
1504

1505
  @type ops: list
1506
  @param ops: list of opcodes
1507
  @type cl: luxi.Client
1508
  @param cl: the luxi client to use for communicating with the master;
1509
             if None, a new client will be created
1510

1511
  """
1512
  if cl is None:
1513
    cl = GetClient()
1514

    
1515
  job_id = cl.SubmitJob(ops)
1516

    
1517
  return job_id
1518

    
1519

    
1520
def GenericPollJob(job_id, cbs, report_cbs):
1521
  """Generic job-polling function.
1522

1523
  @type job_id: number
1524
  @param job_id: Job ID
1525
  @type cbs: Instance of L{JobPollCbBase}
1526
  @param cbs: Data callbacks
1527
  @type report_cbs: Instance of L{JobPollReportCbBase}
1528
  @param report_cbs: Reporting callbacks
1529

1530
  """
1531
  prev_job_info = None
1532
  prev_logmsg_serial = None
1533

    
1534
  status = None
1535

    
1536
  while True:
1537
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1538
                                      prev_logmsg_serial)
1539
    if not result:
1540
      # job not found, go away!
1541
      raise errors.JobLost("Job with id %s lost" % job_id)
1542

    
1543
    if result == constants.JOB_NOTCHANGED:
1544
      report_cbs.ReportNotChanged(job_id, status)
1545

    
1546
      # Wait again
1547
      continue
1548

    
1549
    # Split result, a tuple of (field values, log entries)
1550
    (job_info, log_entries) = result
1551
    (status, ) = job_info
1552

    
1553
    if log_entries:
1554
      for log_entry in log_entries:
1555
        (serial, timestamp, log_type, message) = log_entry
1556
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1557
                                    log_type, message)
1558
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1559

    
1560
    # TODO: Handle canceled and archived jobs
1561
    elif status in (constants.JOB_STATUS_SUCCESS,
1562
                    constants.JOB_STATUS_ERROR,
1563
                    constants.JOB_STATUS_CANCELING,
1564
                    constants.JOB_STATUS_CANCELED):
1565
      break
1566

    
1567
    prev_job_info = job_info
1568

    
1569
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1570
  if not jobs:
1571
    raise errors.JobLost("Job with id %s lost" % job_id)
1572

    
1573
  status, opstatus, result = jobs[0]
1574

    
1575
  if status == constants.JOB_STATUS_SUCCESS:
1576
    return result
1577

    
1578
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1579
    raise errors.OpExecError("Job was canceled")
1580

    
1581
  has_ok = False
1582
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1583
    if status == constants.OP_STATUS_SUCCESS:
1584
      has_ok = True
1585
    elif status == constants.OP_STATUS_ERROR:
1586
      errors.MaybeRaise(msg)
1587

    
1588
      if has_ok:
1589
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1590
                                 (idx, msg))
1591

    
1592
      raise errors.OpExecError(str(msg))
1593

    
1594
  # default failure mode
1595
  raise errors.OpExecError(result)
1596

    
1597

    
1598
class JobPollCbBase:
1599
  """Base class for L{GenericPollJob} callbacks.
1600

1601
  """
1602
  def __init__(self):
1603
    """Initializes this class.
1604

1605
    """
1606

    
1607
  def WaitForJobChangeOnce(self, job_id, fields,
1608
                           prev_job_info, prev_log_serial):
1609
    """Waits for changes on a job.
1610

1611
    """
1612
    raise NotImplementedError()
1613

    
1614
  def QueryJobs(self, job_ids, fields):
1615
    """Returns the selected fields for the selected job IDs.
1616

1617
    @type job_ids: list of numbers
1618
    @param job_ids: Job IDs
1619
    @type fields: list of strings
1620
    @param fields: Fields
1621

1622
    """
1623
    raise NotImplementedError()
1624

    
1625

    
1626
class JobPollReportCbBase:
1627
  """Base class for L{GenericPollJob} reporting callbacks.
1628

1629
  """
1630
  def __init__(self):
1631
    """Initializes this class.
1632

1633
    """
1634

    
1635
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1636
    """Handles a log message.
1637

1638
    """
1639
    raise NotImplementedError()
1640

    
1641
  def ReportNotChanged(self, job_id, status):
1642
    """Called for if a job hasn't changed in a while.
1643

1644
    @type job_id: number
1645
    @param job_id: Job ID
1646
    @type status: string or None
1647
    @param status: Job status if available
1648

1649
    """
1650
    raise NotImplementedError()
1651

    
1652

    
1653
class _LuxiJobPollCb(JobPollCbBase):
1654
  def __init__(self, cl):
1655
    """Initializes this class.
1656

1657
    """
1658
    JobPollCbBase.__init__(self)
1659
    self.cl = cl
1660

    
1661
  def WaitForJobChangeOnce(self, job_id, fields,
1662
                           prev_job_info, prev_log_serial):
1663
    """Waits for changes on a job.
1664

1665
    """
1666
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1667
                                        prev_job_info, prev_log_serial)
1668

    
1669
  def QueryJobs(self, job_ids, fields):
1670
    """Returns the selected fields for the selected job IDs.
1671

1672
    """
1673
    return self.cl.QueryJobs(job_ids, fields)
1674

    
1675

    
1676
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1677
  def __init__(self, feedback_fn):
1678
    """Initializes this class.
1679

1680
    """
1681
    JobPollReportCbBase.__init__(self)
1682

    
1683
    self.feedback_fn = feedback_fn
1684

    
1685
    assert callable(feedback_fn)
1686

    
1687
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1688
    """Handles a log message.
1689

1690
    """
1691
    self.feedback_fn((timestamp, log_type, log_msg))
1692

    
1693
  def ReportNotChanged(self, job_id, status):
1694
    """Called if a job hasn't changed in a while.
1695

1696
    """
1697
    # Ignore
1698

    
1699

    
1700
class StdioJobPollReportCb(JobPollReportCbBase):
1701
  def __init__(self):
1702
    """Initializes this class.
1703

1704
    """
1705
    JobPollReportCbBase.__init__(self)
1706

    
1707
    self.notified_queued = False
1708
    self.notified_waitlock = False
1709

    
1710
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1711
    """Handles a log message.
1712

1713
    """
1714
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1715
             FormatLogMessage(log_type, log_msg))
1716

    
1717
  def ReportNotChanged(self, job_id, status):
1718
    """Called if a job hasn't changed in a while.
1719

1720
    """
1721
    if status is None:
1722
      return
1723

    
1724
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1725
      ToStderr("Job %s is waiting in queue", job_id)
1726
      self.notified_queued = True
1727

    
1728
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1729
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1730
      self.notified_waitlock = True
1731

    
1732

    
1733
def FormatLogMessage(log_type, log_msg):
1734
  """Formats a job message according to its type.
1735

1736
  """
1737
  if log_type != constants.ELOG_MESSAGE:
1738
    log_msg = str(log_msg)
1739

    
1740
  return utils.SafeEncode(log_msg)
1741

    
1742

    
1743
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1744
  """Function to poll for the result of a job.
1745

1746
  @type job_id: job identified
1747
  @param job_id: the job to poll for results
1748
  @type cl: luxi.Client
1749
  @param cl: the luxi client to use for communicating with the master;
1750
             if None, a new client will be created
1751

1752
  """
1753
  if cl is None:
1754
    cl = GetClient()
1755

    
1756
  if reporter is None:
1757
    if feedback_fn:
1758
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1759
    else:
1760
      reporter = StdioJobPollReportCb()
1761
  elif feedback_fn:
1762
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1763

    
1764
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1765

    
1766

    
1767
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1768
  """Legacy function to submit an opcode.
1769

1770
  This is just a simple wrapper over the construction of the processor
1771
  instance. It should be extended to better handle feedback and
1772
  interaction functions.
1773

1774
  """
1775
  if cl is None:
1776
    cl = GetClient()
1777

    
1778
  SetGenericOpcodeOpts([op], opts)
1779

    
1780
  job_id = SendJob([op], cl=cl)
1781

    
1782
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1783
                       reporter=reporter)
1784

    
1785
  return op_results[0]
1786

    
1787

    
1788
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1789
  """Wrapper around SubmitOpCode or SendJob.
1790

1791
  This function will decide, based on the 'opts' parameter, whether to
1792
  submit and wait for the result of the opcode (and return it), or
1793
  whether to just send the job and print its identifier. It is used in
1794
  order to simplify the implementation of the '--submit' option.
1795

1796
  It will also process the opcodes if we're sending the via SendJob
1797
  (otherwise SubmitOpCode does it).
1798

1799
  """
1800
  if opts and opts.submit_only:
1801
    job = [op]
1802
    SetGenericOpcodeOpts(job, opts)
1803
    job_id = SendJob(job, cl=cl)
1804
    raise JobSubmittedException(job_id)
1805
  else:
1806
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1807

    
1808

    
1809
def SetGenericOpcodeOpts(opcode_list, options):
1810
  """Processor for generic options.
1811

1812
  This function updates the given opcodes based on generic command
1813
  line options (like debug, dry-run, etc.).
1814

1815
  @param opcode_list: list of opcodes
1816
  @param options: command line options or None
1817
  @return: None (in-place modification)
1818

1819
  """
1820
  if not options:
1821
    return
1822
  for op in opcode_list:
1823
    op.debug_level = options.debug
1824
    if hasattr(options, "dry_run"):
1825
      op.dry_run = options.dry_run
1826
    if getattr(options, "priority", None) is not None:
1827
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1828

    
1829

    
1830
def GetClient():
1831
  # TODO: Cache object?
1832
  try:
1833
    client = luxi.Client()
1834
  except luxi.NoMasterError:
1835
    ss = ssconf.SimpleStore()
1836

    
1837
    # Try to read ssconf file
1838
    try:
1839
      ss.GetMasterNode()
1840
    except errors.ConfigurationError:
1841
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1842
                                 " not part of a cluster")
1843

    
1844
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1845
    if master != myself:
1846
      raise errors.OpPrereqError("This is not the master node, please connect"
1847
                                 " to node '%s' and rerun the command" %
1848
                                 master)
1849
    raise
1850
  return client
1851

    
1852

    
1853
def FormatError(err):
1854
  """Return a formatted error message for a given error.
1855

1856
  This function takes an exception instance and returns a tuple
1857
  consisting of two values: first, the recommended exit code, and
1858
  second, a string describing the error message (not
1859
  newline-terminated).
1860

1861
  """
1862
  retcode = 1
1863
  obuf = StringIO()
1864
  msg = str(err)
1865
  if isinstance(err, errors.ConfigurationError):
1866
    txt = "Corrupt configuration file: %s" % msg
1867
    logging.error(txt)
1868
    obuf.write(txt + "\n")
1869
    obuf.write("Aborting.")
1870
    retcode = 2
1871
  elif isinstance(err, errors.HooksAbort):
1872
    obuf.write("Failure: hooks execution failed:\n")
1873
    for node, script, out in err.args[0]:
1874
      if out:
1875
        obuf.write("  node: %s, script: %s, output: %s\n" %
1876
                   (node, script, out))
1877
      else:
1878
        obuf.write("  node: %s, script: %s (no output)\n" %
1879
                   (node, script))
1880
  elif isinstance(err, errors.HooksFailure):
1881
    obuf.write("Failure: hooks general failure: %s" % msg)
1882
  elif isinstance(err, errors.ResolverError):
1883
    this_host = netutils.Hostname.GetSysName()
1884
    if err.args[0] == this_host:
1885
      msg = "Failure: can't resolve my own hostname ('%s')"
1886
    else:
1887
      msg = "Failure: can't resolve hostname '%s'"
1888
    obuf.write(msg % err.args[0])
1889
  elif isinstance(err, errors.OpPrereqError):
1890
    if len(err.args) == 2:
1891
      obuf.write("Failure: prerequisites not met for this"
1892
               " operation:\nerror type: %s, error details:\n%s" %
1893
                 (err.args[1], err.args[0]))
1894
    else:
1895
      obuf.write("Failure: prerequisites not met for this"
1896
                 " operation:\n%s" % msg)
1897
  elif isinstance(err, errors.OpExecError):
1898
    obuf.write("Failure: command execution error:\n%s" % msg)
1899
  elif isinstance(err, errors.TagError):
1900
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1901
  elif isinstance(err, errors.JobQueueDrainError):
1902
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1903
               " accept new requests\n")
1904
  elif isinstance(err, errors.JobQueueFull):
1905
    obuf.write("Failure: the job queue is full and doesn't accept new"
1906
               " job submissions until old jobs are archived\n")
1907
  elif isinstance(err, errors.TypeEnforcementError):
1908
    obuf.write("Parameter Error: %s" % msg)
1909
  elif isinstance(err, errors.ParameterError):
1910
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1911
  elif isinstance(err, luxi.NoMasterError):
1912
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1913
               " and listening for connections?")
1914
  elif isinstance(err, luxi.TimeoutError):
1915
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1916
               " been submitted and will continue to run even if the call"
1917
               " timed out. Useful commands in this situation are \"gnt-job"
1918
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1919
    obuf.write(msg)
1920
  elif isinstance(err, luxi.PermissionError):
1921
    obuf.write("It seems you don't have permissions to connect to the"
1922
               " master daemon.\nPlease retry as a different user.")
1923
  elif isinstance(err, luxi.ProtocolError):
1924
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1925
               "%s" % msg)
1926
  elif isinstance(err, errors.JobLost):
1927
    obuf.write("Error checking job status: %s" % msg)
1928
  elif isinstance(err, errors.QueryFilterParseError):
1929
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1930
    obuf.write("\n".join(err.GetDetails()))
1931
  elif isinstance(err, errors.GenericError):
1932
    obuf.write("Unhandled Ganeti error: %s" % msg)
1933
  elif isinstance(err, JobSubmittedException):
1934
    obuf.write("JobID: %s\n" % err.args[0])
1935
    retcode = 0
1936
  else:
1937
    obuf.write("Unhandled exception: %s" % msg)
1938
  return retcode, obuf.getvalue().rstrip('\n')
1939

    
1940

    
1941
def GenericMain(commands, override=None, aliases=None):
1942
  """Generic main function for all the gnt-* commands.
1943

1944
  Arguments:
1945
    - commands: a dictionary with a special structure, see the design doc
1946
                for command line handling.
1947
    - override: if not None, we expect a dictionary with keys that will
1948
                override command line options; this can be used to pass
1949
                options from the scripts to generic functions
1950
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1951

1952
  """
1953
  # save the program name and the entire command line for later logging
1954
  if sys.argv:
1955
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1956
    if len(sys.argv) >= 2:
1957
      binary += " " + sys.argv[1]
1958
      old_cmdline = " ".join(sys.argv[2:])
1959
    else:
1960
      old_cmdline = ""
1961
  else:
1962
    binary = "<unknown program>"
1963
    old_cmdline = ""
1964

    
1965
  if aliases is None:
1966
    aliases = {}
1967

    
1968
  try:
1969
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1970
  except errors.ParameterError, err:
1971
    result, err_msg = FormatError(err)
1972
    ToStderr(err_msg)
1973
    return 1
1974

    
1975
  if func is None: # parse error
1976
    return 1
1977

    
1978
  if override is not None:
1979
    for key, val in override.iteritems():
1980
      setattr(options, key, val)
1981

    
1982
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1983
                     stderr_logging=True)
1984

    
1985
  if old_cmdline:
1986
    logging.info("run with arguments '%s'", old_cmdline)
1987
  else:
1988
    logging.info("run with no arguments")
1989

    
1990
  try:
1991
    result = func(options, args)
1992
  except (errors.GenericError, luxi.ProtocolError,
1993
          JobSubmittedException), err:
1994
    result, err_msg = FormatError(err)
1995
    logging.exception("Error during command processing")
1996
    ToStderr(err_msg)
1997
  except KeyboardInterrupt:
1998
    result = constants.EXIT_FAILURE
1999
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2000
             " might have been submitted and"
2001
             " will continue to run in the background.")
2002
  except IOError, err:
2003
    if err.errno == errno.EPIPE:
2004
      # our terminal went away, we'll exit
2005
      sys.exit(constants.EXIT_FAILURE)
2006
    else:
2007
      raise
2008

    
2009
  return result
2010

    
2011

    
2012
def ParseNicOption(optvalue):
2013
  """Parses the value of the --net option(s).
2014

2015
  """
2016
  try:
2017
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2018
  except (TypeError, ValueError), err:
2019
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2020

    
2021
  nics = [{}] * nic_max
2022
  for nidx, ndict in optvalue:
2023
    nidx = int(nidx)
2024

    
2025
    if not isinstance(ndict, dict):
2026
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2027
                                 " got %s" % (nidx, ndict))
2028

    
2029
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2030

    
2031
    nics[nidx] = ndict
2032

    
2033
  return nics
2034

    
2035

    
2036
def GenericInstanceCreate(mode, opts, args):
2037
  """Add an instance to the cluster via either creation or import.
2038

2039
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2040
  @param opts: the command line options selected by the user
2041
  @type args: list
2042
  @param args: should contain only one element, the new instance name
2043
  @rtype: int
2044
  @return: the desired exit code
2045

2046
  """
2047
  instance = args[0]
2048

    
2049
  (pnode, snode) = SplitNodeOption(opts.node)
2050

    
2051
  hypervisor = None
2052
  hvparams = {}
2053
  if opts.hypervisor:
2054
    hypervisor, hvparams = opts.hypervisor
2055

    
2056
  if opts.nics:
2057
    nics = ParseNicOption(opts.nics)
2058
  elif opts.no_nics:
2059
    # no nics
2060
    nics = []
2061
  elif mode == constants.INSTANCE_CREATE:
2062
    # default of one nic, all auto
2063
    nics = [{}]
2064
  else:
2065
    # mode == import
2066
    nics = []
2067

    
2068
  if opts.disk_template == constants.DT_DISKLESS:
2069
    if opts.disks or opts.sd_size is not None:
2070
      raise errors.OpPrereqError("Diskless instance but disk"
2071
                                 " information passed")
2072
    disks = []
2073
  else:
2074
    if (not opts.disks and not opts.sd_size
2075
        and mode == constants.INSTANCE_CREATE):
2076
      raise errors.OpPrereqError("No disk information specified")
2077
    if opts.disks and opts.sd_size is not None:
2078
      raise errors.OpPrereqError("Please use either the '--disk' or"
2079
                                 " '-s' option")
2080
    if opts.sd_size is not None:
2081
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2082

    
2083
    if opts.disks:
2084
      try:
2085
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2086
      except ValueError, err:
2087
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2088
      disks = [{}] * disk_max
2089
    else:
2090
      disks = []
2091
    for didx, ddict in opts.disks:
2092
      didx = int(didx)
2093
      if not isinstance(ddict, dict):
2094
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2095
        raise errors.OpPrereqError(msg)
2096
      elif constants.IDISK_SIZE in ddict:
2097
        if constants.IDISK_ADOPT in ddict:
2098
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2099
                                     " (disk %d)" % didx)
2100
        try:
2101
          ddict[constants.IDISK_SIZE] = \
2102
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2103
        except ValueError, err:
2104
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2105
                                     (didx, err))
2106
      elif constants.IDISK_ADOPT in ddict:
2107
        if mode == constants.INSTANCE_IMPORT:
2108
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2109
                                     " import")
2110
        ddict[constants.IDISK_SIZE] = 0
2111
      else:
2112
        raise errors.OpPrereqError("Missing size or adoption source for"
2113
                                   " disk %d" % didx)
2114
      disks[didx] = ddict
2115

    
2116
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2117
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2118

    
2119
  if mode == constants.INSTANCE_CREATE:
2120
    start = opts.start
2121
    os_type = opts.os
2122
    force_variant = opts.force_variant
2123
    src_node = None
2124
    src_path = None
2125
    no_install = opts.no_install
2126
    identify_defaults = False
2127
  elif mode == constants.INSTANCE_IMPORT:
2128
    start = False
2129
    os_type = None
2130
    force_variant = False
2131
    src_node = opts.src_node
2132
    src_path = opts.src_dir
2133
    no_install = None
2134
    identify_defaults = opts.identify_defaults
2135
  else:
2136
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2137

    
2138
  op = opcodes.OpInstanceCreate(instance_name=instance,
2139
                                disks=disks,
2140
                                disk_template=opts.disk_template,
2141
                                nics=nics,
2142
                                pnode=pnode, snode=snode,
2143
                                ip_check=opts.ip_check,
2144
                                name_check=opts.name_check,
2145
                                wait_for_sync=opts.wait_for_sync,
2146
                                file_storage_dir=opts.file_storage_dir,
2147
                                file_driver=opts.file_driver,
2148
                                iallocator=opts.iallocator,
2149
                                hypervisor=hypervisor,
2150
                                hvparams=hvparams,
2151
                                beparams=opts.beparams,
2152
                                osparams=opts.osparams,
2153
                                mode=mode,
2154
                                start=start,
2155
                                os_type=os_type,
2156
                                force_variant=force_variant,
2157
                                src_node=src_node,
2158
                                src_path=src_path,
2159
                                no_install=no_install,
2160
                                identify_defaults=identify_defaults)
2161

    
2162
  SubmitOrSend(op, opts)
2163
  return 0
2164

    
2165

    
2166
class _RunWhileClusterStoppedHelper:
2167
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2168

2169
  """
2170
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2171
    """Initializes this class.
2172

2173
    @type feedback_fn: callable
2174
    @param feedback_fn: Feedback function
2175
    @type cluster_name: string
2176
    @param cluster_name: Cluster name
2177
    @type master_node: string
2178
    @param master_node Master node name
2179
    @type online_nodes: list
2180
    @param online_nodes: List of names of online nodes
2181

2182
    """
2183
    self.feedback_fn = feedback_fn
2184
    self.cluster_name = cluster_name
2185
    self.master_node = master_node
2186
    self.online_nodes = online_nodes
2187

    
2188
    self.ssh = ssh.SshRunner(self.cluster_name)
2189

    
2190
    self.nonmaster_nodes = [name for name in online_nodes
2191
                            if name != master_node]
2192

    
2193
    assert self.master_node not in self.nonmaster_nodes
2194

    
2195
  def _RunCmd(self, node_name, cmd):
2196
    """Runs a command on the local or a remote machine.
2197

2198
    @type node_name: string
2199
    @param node_name: Machine name
2200
    @type cmd: list
2201
    @param cmd: Command
2202

2203
    """
2204
    if node_name is None or node_name == self.master_node:
2205
      # No need to use SSH
2206
      result = utils.RunCmd(cmd)
2207
    else:
2208
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2209

    
2210
    if result.failed:
2211
      errmsg = ["Failed to run command %s" % result.cmd]
2212
      if node_name:
2213
        errmsg.append("on node %s" % node_name)
2214
      errmsg.append(": exitcode %s and error %s" %
2215
                    (result.exit_code, result.output))
2216
      raise errors.OpExecError(" ".join(errmsg))
2217

    
2218
  def Call(self, fn, *args):
2219
    """Call function while all daemons are stopped.
2220

2221
    @type fn: callable
2222
    @param fn: Function to be called
2223

2224
    """
2225
    # Pause watcher by acquiring an exclusive lock on watcher state file
2226
    self.feedback_fn("Blocking watcher")
2227
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2228
    try:
2229
      # TODO: Currently, this just blocks. There's no timeout.
2230
      # TODO: Should it be a shared lock?
2231
      watcher_block.Exclusive(blocking=True)
2232

    
2233
      # Stop master daemons, so that no new jobs can come in and all running
2234
      # ones are finished
2235
      self.feedback_fn("Stopping master daemons")
2236
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2237
      try:
2238
        # Stop daemons on all nodes
2239
        for node_name in self.online_nodes:
2240
          self.feedback_fn("Stopping daemons on %s" % node_name)
2241
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2242

    
2243
        # All daemons are shut down now
2244
        try:
2245
          return fn(self, *args)
2246
        except Exception, err:
2247
          _, errmsg = FormatError(err)
2248
          logging.exception("Caught exception")
2249
          self.feedback_fn(errmsg)
2250
          raise
2251
      finally:
2252
        # Start cluster again, master node last
2253
        for node_name in self.nonmaster_nodes + [self.master_node]:
2254
          self.feedback_fn("Starting daemons on %s" % node_name)
2255
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2256
    finally:
2257
      # Resume watcher
2258
      watcher_block.Close()
2259

    
2260

    
2261
def RunWhileClusterStopped(feedback_fn, fn, *args):
2262
  """Calls a function while all cluster daemons are stopped.
2263

2264
  @type feedback_fn: callable
2265
  @param feedback_fn: Feedback function
2266
  @type fn: callable
2267
  @param fn: Function to be called when daemons are stopped
2268

2269
  """
2270
  feedback_fn("Gathering cluster information")
2271

    
2272
  # This ensures we're running on the master daemon
2273
  cl = GetClient()
2274

    
2275
  (cluster_name, master_node) = \
2276
    cl.QueryConfigValues(["cluster_name", "master_node"])
2277

    
2278
  online_nodes = GetOnlineNodes([], cl=cl)
2279

    
2280
  # Don't keep a reference to the client. The master daemon will go away.
2281
  del cl
2282

    
2283
  assert master_node in online_nodes
2284

    
2285
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2286
                                       online_nodes).Call(fn, *args)
2287

    
2288

    
2289
def GenerateTable(headers, fields, separator, data,
2290
                  numfields=None, unitfields=None,
2291
                  units=None):
2292
  """Prints a table with headers and different fields.
2293

2294
  @type headers: dict
2295
  @param headers: dictionary mapping field names to headers for
2296
      the table
2297
  @type fields: list
2298
  @param fields: the field names corresponding to each row in
2299
      the data field
2300
  @param separator: the separator to be used; if this is None,
2301
      the default 'smart' algorithm is used which computes optimal
2302
      field width, otherwise just the separator is used between
2303
      each field
2304
  @type data: list
2305
  @param data: a list of lists, each sublist being one row to be output
2306
  @type numfields: list
2307
  @param numfields: a list with the fields that hold numeric
2308
      values and thus should be right-aligned
2309
  @type unitfields: list
2310
  @param unitfields: a list with the fields that hold numeric
2311
      values that should be formatted with the units field
2312
  @type units: string or None
2313
  @param units: the units we should use for formatting, or None for
2314
      automatic choice (human-readable for non-separator usage, otherwise
2315
      megabytes); this is a one-letter string
2316

2317
  """
2318
  if units is None:
2319
    if separator:
2320
      units = "m"
2321
    else:
2322
      units = "h"
2323

    
2324
  if numfields is None:
2325
    numfields = []
2326
  if unitfields is None:
2327
    unitfields = []
2328

    
2329
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2330
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2331

    
2332
  format_fields = []
2333
  for field in fields:
2334
    if headers and field not in headers:
2335
      # TODO: handle better unknown fields (either revert to old
2336
      # style of raising exception, or deal more intelligently with
2337
      # variable fields)
2338
      headers[field] = field
2339
    if separator is not None:
2340
      format_fields.append("%s")
2341
    elif numfields.Matches(field):
2342
      format_fields.append("%*s")
2343
    else:
2344
      format_fields.append("%-*s")
2345

    
2346
  if separator is None:
2347
    mlens = [0 for name in fields]
2348
    format_str = ' '.join(format_fields)
2349
  else:
2350
    format_str = separator.replace("%", "%%").join(format_fields)
2351

    
2352
  for row in data:
2353
    if row is None:
2354
      continue
2355
    for idx, val in enumerate(row):
2356
      if unitfields.Matches(fields[idx]):
2357
        try:
2358
          val = int(val)
2359
        except (TypeError, ValueError):
2360
          pass
2361
        else:
2362
          val = row[idx] = utils.FormatUnit(val, units)
2363
      val = row[idx] = str(val)
2364
      if separator is None:
2365
        mlens[idx] = max(mlens[idx], len(val))
2366

    
2367
  result = []
2368
  if headers:
2369
    args = []
2370
    for idx, name in enumerate(fields):
2371
      hdr = headers[name]
2372
      if separator is None:
2373
        mlens[idx] = max(mlens[idx], len(hdr))
2374
        args.append(mlens[idx])
2375
      args.append(hdr)
2376
    result.append(format_str % tuple(args))
2377

    
2378
  if separator is None:
2379
    assert len(mlens) == len(fields)
2380

    
2381
    if fields and not numfields.Matches(fields[-1]):
2382
      mlens[-1] = 0
2383

    
2384
  for line in data:
2385
    args = []
2386
    if line is None:
2387
      line = ['-' for _ in fields]
2388
    for idx in range(len(fields)):
2389
      if separator is None:
2390
        args.append(mlens[idx])
2391
      args.append(line[idx])
2392
    result.append(format_str % tuple(args))
2393

    
2394
  return result
2395

    
2396

    
2397
def _FormatBool(value):
2398
  """Formats a boolean value as a string.
2399

2400
  """
2401
  if value:
2402
    return "Y"
2403
  return "N"
2404

    
2405

    
2406
#: Default formatting for query results; (callback, align right)
2407
_DEFAULT_FORMAT_QUERY = {
2408
  constants.QFT_TEXT: (str, False),
2409
  constants.QFT_BOOL: (_FormatBool, False),
2410
  constants.QFT_NUMBER: (str, True),
2411
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2412
  constants.QFT_OTHER: (str, False),
2413
  constants.QFT_UNKNOWN: (str, False),
2414
  }
2415

    
2416

    
2417
def _GetColumnFormatter(fdef, override, unit):
2418
  """Returns formatting function for a field.
2419

2420
  @type fdef: L{objects.QueryFieldDefinition}
2421
  @type override: dict
2422
  @param override: Dictionary for overriding field formatting functions,
2423
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2424
  @type unit: string
2425
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2426
  @rtype: tuple; (callable, bool)
2427
  @return: Returns the function to format a value (takes one parameter) and a
2428
    boolean for aligning the value on the right-hand side
2429

2430
  """
2431
  fmt = override.get(fdef.name, None)
2432
  if fmt is not None:
2433
    return fmt
2434

    
2435
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2436

    
2437
  if fdef.kind == constants.QFT_UNIT:
2438
    # Can't keep this information in the static dictionary
2439
    return (lambda value: utils.FormatUnit(value, unit), True)
2440

    
2441
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2442
  if fmt is not None:
2443
    return fmt
2444

    
2445
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2446

    
2447

    
2448
class _QueryColumnFormatter:
2449
  """Callable class for formatting fields of a query.
2450

2451
  """
2452
  def __init__(self, fn, status_fn, verbose):
2453
    """Initializes this class.
2454

2455
    @type fn: callable
2456
    @param fn: Formatting function
2457
    @type status_fn: callable
2458
    @param status_fn: Function to report fields' status
2459
    @type verbose: boolean
2460
    @param verbose: whether to use verbose field descriptions or not
2461

2462
    """
2463
    self._fn = fn
2464
    self._status_fn = status_fn
2465
    self._verbose = verbose
2466

    
2467
  def __call__(self, data):
2468
    """Returns a field's string representation.
2469

2470
    """
2471
    (status, value) = data
2472

    
2473
    # Report status
2474
    self._status_fn(status)
2475

    
2476
    if status == constants.RS_NORMAL:
2477
      return self._fn(value)
2478

    
2479
    assert value is None, \
2480
           "Found value %r for abnormal status %s" % (value, status)
2481

    
2482
    return FormatResultError(status, self._verbose)
2483

    
2484

    
2485
def FormatResultError(status, verbose):
2486
  """Formats result status other than L{constants.RS_NORMAL}.
2487

2488
  @param status: The result status
2489
  @type verbose: boolean
2490
  @param verbose: Whether to return the verbose text
2491
  @return: Text of result status
2492

2493
  """
2494
  assert status != constants.RS_NORMAL, \
2495
         "FormatResultError called with status equal to constants.RS_NORMAL"
2496
  try:
2497
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2498
  except KeyError:
2499
    raise NotImplementedError("Unknown status %s" % status)
2500
  else:
2501
    if verbose:
2502
      return verbose_text
2503
    return normal_text
2504

    
2505

    
2506
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2507
                      header=False, verbose=False):
2508
  """Formats data in L{objects.QueryResponse}.
2509

2510
  @type result: L{objects.QueryResponse}
2511
  @param result: result of query operation
2512
  @type unit: string
2513
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2514
    see L{utils.text.FormatUnit}
2515
  @type format_override: dict
2516
  @param format_override: Dictionary for overriding field formatting functions,
2517
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2518
  @type separator: string or None
2519
  @param separator: String used to separate fields
2520
  @type header: bool
2521
  @param header: Whether to output header row
2522
  @type verbose: boolean
2523
  @param verbose: whether to use verbose field descriptions or not
2524

2525
  """
2526
  if unit is None:
2527
    if separator:
2528
      unit = "m"
2529
    else:
2530
      unit = "h"
2531

    
2532
  if format_override is None:
2533
    format_override = {}
2534

    
2535
  stats = dict.fromkeys(constants.RS_ALL, 0)
2536

    
2537
  def _RecordStatus(status):
2538
    if status in stats:
2539
      stats[status] += 1
2540

    
2541
  columns = []
2542
  for fdef in result.fields:
2543
    assert fdef.title and fdef.name
2544
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2545
    columns.append(TableColumn(fdef.title,
2546
                               _QueryColumnFormatter(fn, _RecordStatus,
2547
                                                     verbose),
2548
                               align_right))
2549

    
2550
  table = FormatTable(result.data, columns, header, separator)
2551

    
2552
  # Collect statistics
2553
  assert len(stats) == len(constants.RS_ALL)
2554
  assert compat.all(count >= 0 for count in stats.values())
2555

    
2556
  # Determine overall status. If there was no data, unknown fields must be
2557
  # detected via the field definitions.
2558
  if (stats[constants.RS_UNKNOWN] or
2559
      (not result.data and _GetUnknownFields(result.fields))):
2560
    status = QR_UNKNOWN
2561
  elif compat.any(count > 0 for key, count in stats.items()
2562
                  if key != constants.RS_NORMAL):
2563
    status = QR_INCOMPLETE
2564
  else:
2565
    status = QR_NORMAL
2566

    
2567
  return (status, table)
2568

    
2569

    
2570
def _GetUnknownFields(fdefs):
2571
  """Returns list of unknown fields included in C{fdefs}.
2572

2573
  @type fdefs: list of L{objects.QueryFieldDefinition}
2574

2575
  """
2576
  return [fdef for fdef in fdefs
2577
          if fdef.kind == constants.QFT_UNKNOWN]
2578

    
2579

    
2580
def _WarnUnknownFields(fdefs):
2581
  """Prints a warning to stderr if a query included unknown fields.
2582

2583
  @type fdefs: list of L{objects.QueryFieldDefinition}
2584

2585
  """
2586
  unknown = _GetUnknownFields(fdefs)
2587
  if unknown:
2588
    ToStderr("Warning: Queried for unknown fields %s",
2589
             utils.CommaJoin(fdef.name for fdef in unknown))
2590
    return True
2591

    
2592
  return False
2593

    
2594

    
2595
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2596
                format_override=None, verbose=False, force_filter=False):
2597
  """Generic implementation for listing all items of a resource.
2598

2599
  @param resource: One of L{constants.QR_VIA_LUXI}
2600
  @type fields: list of strings
2601
  @param fields: List of fields to query for
2602
  @type names: list of strings
2603
  @param names: Names of items to query for
2604
  @type unit: string or None
2605
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2606
    None for automatic choice (human-readable for non-separator usage,
2607
    otherwise megabytes); this is a one-letter string
2608
  @type separator: string or None
2609
  @param separator: String used to separate fields
2610
  @type header: bool
2611
  @param header: Whether to show header row
2612
  @type force_filter: bool
2613
  @param force_filter: Whether to always treat names as filter
2614
  @type format_override: dict
2615
  @param format_override: Dictionary for overriding field formatting functions,
2616
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2617
  @type verbose: boolean
2618
  @param verbose: whether to use verbose field descriptions or not
2619

2620
  """
2621
  if cl is None:
2622
    cl = GetClient()
2623

    
2624
  if not names:
2625
    names = None
2626

    
2627
  if (force_filter or
2628
      (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
2629
    try:
2630
      (filter_text, ) = names
2631
    except ValueError:
2632
      raise errors.OpPrereqError("Exactly one argument must be given as a"
2633
                                 " filter")
2634

    
2635
    logging.debug("Parsing '%s' as filter", filter_text)
2636
    filter_ = qlang.ParseFilter(filter_text)
2637
  else:
2638
    filter_ = qlang.MakeSimpleFilter("name", names)
2639

    
2640
  response = cl.Query(resource, fields, filter_)
2641

    
2642
  found_unknown = _WarnUnknownFields(response.fields)
2643

    
2644
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2645
                                     header=header,
2646
                                     format_override=format_override,
2647
                                     verbose=verbose)
2648

    
2649
  for line in data:
2650
    ToStdout(line)
2651

    
2652
  assert ((found_unknown and status == QR_UNKNOWN) or
2653
          (not found_unknown and status != QR_UNKNOWN))
2654

    
2655
  if status == QR_UNKNOWN:
2656
    return constants.EXIT_UNKNOWN_FIELD
2657

    
2658
  # TODO: Should the list command fail if not all data could be collected?
2659
  return constants.EXIT_SUCCESS
2660

    
2661

    
2662
def GenericListFields(resource, fields, separator, header, cl=None):
2663
  """Generic implementation for listing fields for a resource.
2664

2665
  @param resource: One of L{constants.QR_VIA_LUXI}
2666
  @type fields: list of strings
2667
  @param fields: List of fields to query for
2668
  @type separator: string or None
2669
  @param separator: String used to separate fields
2670
  @type header: bool
2671
  @param header: Whether to show header row
2672

2673
  """
2674
  if cl is None:
2675
    cl = GetClient()
2676

    
2677
  if not fields:
2678
    fields = None
2679

    
2680
  response = cl.QueryFields(resource, fields)
2681

    
2682
  found_unknown = _WarnUnknownFields(response.fields)
2683

    
2684
  columns = [
2685
    TableColumn("Name", str, False),
2686
    TableColumn("Title", str, False),
2687
    TableColumn("Description", str, False),
2688
    ]
2689

    
2690
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2691

    
2692
  for line in FormatTable(rows, columns, header, separator):
2693
    ToStdout(line)
2694

    
2695
  if found_unknown:
2696
    return constants.EXIT_UNKNOWN_FIELD
2697

    
2698
  return constants.EXIT_SUCCESS
2699

    
2700

    
2701
class TableColumn:
2702
  """Describes a column for L{FormatTable}.
2703

2704
  """
2705
  def __init__(self, title, fn, align_right):
2706
    """Initializes this class.
2707

2708
    @type title: string
2709
    @param title: Column title
2710
    @type fn: callable
2711
    @param fn: Formatting function
2712
    @type align_right: bool
2713
    @param align_right: Whether to align values on the right-hand side
2714

2715
    """
2716
    self.title = title
2717
    self.format = fn
2718
    self.align_right = align_right
2719

    
2720

    
2721
def _GetColFormatString(width, align_right):
2722
  """Returns the format string for a field.
2723

2724
  """
2725
  if align_right:
2726
    sign = ""
2727
  else:
2728
    sign = "-"
2729

    
2730
  return "%%%s%ss" % (sign, width)
2731

    
2732

    
2733
def FormatTable(rows, columns, header, separator):
2734
  """Formats data as a table.
2735

2736
  @type rows: list of lists
2737
  @param rows: Row data, one list per row
2738
  @type columns: list of L{TableColumn}
2739
  @param columns: Column descriptions
2740
  @type header: bool
2741
  @param header: Whether to show header row
2742
  @type separator: string or None
2743
  @param separator: String used to separate columns
2744

2745
  """
2746
  if header:
2747
    data = [[col.title for col in columns]]
2748
    colwidth = [len(col.title) for col in columns]
2749
  else:
2750
    data = []
2751
    colwidth = [0 for _ in columns]
2752

    
2753
  # Format row data
2754
  for row in rows:
2755
    assert len(row) == len(columns)
2756

    
2757
    formatted = [col.format(value) for value, col in zip(row, columns)]
2758

    
2759
    if separator is None:
2760
      # Update column widths
2761
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2762
        # Modifying a list's items while iterating is fine
2763
        colwidth[idx] = max(oldwidth, len(value))
2764

    
2765
    data.append(formatted)
2766

    
2767
  if separator is not None:
2768
    # Return early if a separator is used
2769
    return [separator.join(row) for row in data]
2770

    
2771
  if columns and not columns[-1].align_right:
2772
    # Avoid unnecessary spaces at end of line
2773
    colwidth[-1] = 0
2774

    
2775
  # Build format string
2776
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2777
                  for col, width in zip(columns, colwidth)])
2778

    
2779
  return [fmt % tuple(row) for row in data]
2780

    
2781

    
2782
def FormatTimestamp(ts):
2783
  """Formats a given timestamp.
2784

2785
  @type ts: timestamp
2786
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2787

2788
  @rtype: string
2789
  @return: a string with the formatted timestamp
2790

2791
  """
2792
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2793
    return '?'
2794
  sec, usec = ts
2795
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2796

    
2797

    
2798
def ParseTimespec(value):
2799
  """Parse a time specification.
2800

2801
  The following suffixed will be recognized:
2802

2803
    - s: seconds
2804
    - m: minutes
2805
    - h: hours
2806
    - d: day
2807
    - w: weeks
2808

2809
  Without any suffix, the value will be taken to be in seconds.
2810

2811
  """
2812
  value = str(value)
2813
  if not value:
2814
    raise errors.OpPrereqError("Empty time specification passed")
2815
  suffix_map = {
2816
    's': 1,
2817
    'm': 60,
2818
    'h': 3600,
2819
    'd': 86400,
2820
    'w': 604800,
2821
    }
2822
  if value[-1] not in suffix_map:
2823
    try:
2824
      value = int(value)
2825
    except (TypeError, ValueError):
2826
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2827
  else:
2828
    multiplier = suffix_map[value[-1]]
2829
    value = value[:-1]
2830
    if not value: # no data left after stripping the suffix
2831
      raise errors.OpPrereqError("Invalid time specification (only"
2832
                                 " suffix passed)")
2833
    try:
2834
      value = int(value) * multiplier
2835
    except (TypeError, ValueError):
2836
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2837
  return value
2838

    
2839

    
2840
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2841
                   filter_master=False):
2842
  """Returns the names of online nodes.
2843

2844
  This function will also log a warning on stderr with the names of
2845
  the online nodes.
2846

2847
  @param nodes: if not empty, use only this subset of nodes (minus the
2848
      offline ones)
2849
  @param cl: if not None, luxi client to use
2850
  @type nowarn: boolean
2851
  @param nowarn: by default, this function will output a note with the
2852
      offline nodes that are skipped; if this parameter is True the
2853
      note is not displayed
2854
  @type secondary_ips: boolean
2855
  @param secondary_ips: if True, return the secondary IPs instead of the
2856
      names, useful for doing network traffic over the replication interface
2857
      (if any)
2858
  @type filter_master: boolean
2859
  @param filter_master: if True, do not return the master node in the list
2860
      (useful in coordination with secondary_ips where we cannot check our
2861
      node name against the list)
2862

2863
  """
2864
  if cl is None:
2865
    cl = GetClient()
2866

    
2867
  if secondary_ips:
2868
    name_idx = 2
2869
  else:
2870
    name_idx = 0
2871

    
2872
  if filter_master:
2873
    master_node = cl.QueryConfigValues(["master_node"])[0]
2874
    filter_fn = lambda x: x != master_node
2875
  else:
2876
    filter_fn = lambda _: True
2877

    
2878
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2879
                         use_locking=False)
2880
  offline = [row[0] for row in result if row[1]]
2881
  if offline and not nowarn:
2882
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2883
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2884

    
2885

    
2886
def _ToStream(stream, txt, *args):
2887
  """Write a message to a stream, bypassing the logging system
2888

2889
  @type stream: file object
2890
  @param stream: the file to which we should write
2891
  @type txt: str
2892
  @param txt: the message
2893

2894
  """
2895
  try:
2896
    if args:
2897
      args = tuple(args)
2898
      stream.write(txt % args)
2899
    else:
2900
      stream.write(txt)
2901
    stream.write('\n')
2902
    stream.flush()
2903
  except IOError, err:
2904
    if err.errno == errno.EPIPE:
2905
      # our terminal went away, we'll exit
2906
      sys.exit(constants.EXIT_FAILURE)
2907
    else:
2908
      raise
2909

    
2910

    
2911
def ToStdout(txt, *args):
2912
  """Write a message to stdout only, bypassing the logging system
2913

2914
  This is just a wrapper over _ToStream.
2915

2916
  @type txt: str
2917
  @param txt: the message
2918

2919
  """
2920
  _ToStream(sys.stdout, txt, *args)
2921

    
2922

    
2923
def ToStderr(txt, *args):
2924
  """Write a message to stderr only, bypassing the logging system
2925

2926
  This is just a wrapper over _ToStream.
2927

2928
  @type txt: str
2929
  @param txt: the message
2930

2931
  """
2932
  _ToStream(sys.stderr, txt, *args)
2933

    
2934

    
2935
class JobExecutor(object):
2936
  """Class which manages the submission and execution of multiple jobs.
2937

2938
  Note that instances of this class should not be reused between
2939
  GetResults() calls.
2940

2941
  """
2942
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2943
    self.queue = []
2944
    if cl is None:
2945
      cl = GetClient()
2946
    self.cl = cl
2947
    self.verbose = verbose
2948
    self.jobs = []
2949
    self.opts = opts
2950
    self.feedback_fn = feedback_fn
2951
    self._counter = itertools.count()
2952

    
2953
  @staticmethod
2954
  def _IfName(name, fmt):
2955
    """Helper function for formatting name.
2956

2957
    """
2958
    if name:
2959
      return fmt % name
2960

    
2961
    return ""
2962

    
2963
  def QueueJob(self, name, *ops):
2964
    """Record a job for later submit.
2965

2966
    @type name: string
2967
    @param name: a description of the job, will be used in WaitJobSet
2968

2969
    """
2970
    SetGenericOpcodeOpts(ops, self.opts)
2971
    self.queue.append((self._counter.next(), name, ops))
2972

    
2973
  def AddJobId(self, name, status, job_id):
2974
    """Adds a job ID to the internal queue.
2975

2976
    """
2977
    self.jobs.append((self._counter.next(), status, job_id, name))
2978

    
2979
  def SubmitPending(self, each=False):
2980
    """Submit all pending jobs.
2981

2982
    """
2983
    if each:
2984
      results = []
2985
      for (_, _, ops) in self.queue:
2986
        # SubmitJob will remove the success status, but raise an exception if
2987
        # the submission fails, so we'll notice that anyway.
2988
        results.append([True, self.cl.SubmitJob(ops)])
2989
    else:
2990
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
2991
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
2992
      self.jobs.append((idx, status, data, name))
2993

    
2994
  def _ChooseJob(self):
2995
    """Choose a non-waiting/queued job to poll next.
2996

2997
    """
2998
    assert self.jobs, "_ChooseJob called with empty job list"
2999

    
3000
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3001
    assert result
3002

    
3003
    for job_data, status in zip(self.jobs, result):
3004
      if (isinstance(status, list) and status and
3005
          status[0] in (constants.JOB_STATUS_QUEUED,
3006
                        constants.JOB_STATUS_WAITLOCK,
3007
                        constants.JOB_STATUS_CANCELING)):
3008
        # job is still present and waiting
3009
        continue
3010
      # good candidate found (either running job or lost job)
3011
      self.jobs.remove(job_data)
3012
      return job_data
3013

    
3014
    # no job found
3015
    return self.jobs.pop(0)
3016

    
3017
  def GetResults(self):
3018
    """Wait for and return the results of all jobs.
3019

3020
    @rtype: list
3021
    @return: list of tuples (success, job results), in the same order
3022
        as the submitted jobs; if a job has failed, instead of the result
3023
        there will be the error message
3024

3025
    """
3026
    if not self.jobs:
3027
      self.SubmitPending()
3028
    results = []
3029
    if self.verbose:
3030
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3031
      if ok_jobs:
3032
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3033

    
3034
    # first, remove any non-submitted jobs
3035
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3036
    for idx, _, jid, name in failures:
3037
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3038
      results.append((idx, False, jid))
3039

    
3040
    while self.jobs:
3041
      (idx, _, jid, name) = self._ChooseJob()
3042
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3043
      try:
3044
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3045
        success = True
3046
      except errors.JobLost, err:
3047
        _, job_result = FormatError(err)
3048
        ToStderr("Job %s%s has been archived, cannot check its result",
3049
                 jid, self._IfName(name, " for %s"))
3050
        success = False
3051
      except (errors.GenericError, luxi.ProtocolError), err:
3052
        _, job_result = FormatError(err)
3053
        success = False
3054
        # the error message will always be shown, verbose or not
3055
        ToStderr("Job %s%s has failed: %s",
3056
                 jid, self._IfName(name, " for %s"), job_result)
3057

    
3058
      results.append((idx, success, job_result))
3059

    
3060
    # sort based on the index, then drop it
3061
    results.sort()
3062
    results = [i[1:] for i in results]
3063

    
3064
    return results
3065

    
3066
  def WaitOrShow(self, wait):
3067
    """Wait for job results or only print the job IDs.
3068

3069
    @type wait: boolean
3070
    @param wait: whether to wait or not
3071

3072
    """
3073
    if wait:
3074
      return self.GetResults()
3075
    else:
3076
      if not self.jobs:
3077
        self.SubmitPending()
3078
      for _, status, result, name in self.jobs:
3079
        if status:
3080
          ToStdout("%s: %s", result, name)
3081
        else:
3082
          ToStderr("Failure for %s: %s", name, result)
3083
      return [row[1:3] for row in self.jobs]
3084

    
3085

    
3086
def FormatParameterDict(buf, param_dict, actual, level=1):
3087
  """Formats a parameter dictionary.
3088

3089
  @type buf: L{StringIO}
3090
  @param buf: the buffer into which to write
3091
  @type param_dict: dict
3092
  @param param_dict: the own parameters
3093
  @type actual: dict
3094
  @param actual: the current parameter set (including defaults)
3095
  @param level: Level of indent
3096

3097
  """
3098
  indent = "  " * level
3099
  for key in sorted(actual):
3100
    val = param_dict.get(key, "default (%s)" % actual[key])
3101
    buf.write("%s- %s: %s\n" % (indent, key, val))
3102

    
3103

    
3104
def ConfirmOperation(names, list_type, text, extra=""):
3105
  """Ask the user to confirm an operation on a list of list_type.
3106

3107
  This function is used to request confirmation for doing an operation
3108
  on a given list of list_type.
3109

3110
  @type names: list
3111
  @param names: the list of names that we display when
3112
      we ask for confirmation
3113
  @type list_type: str
3114
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3115
  @type text: str
3116
  @param text: the operation that the user should confirm
3117
  @rtype: boolean
3118
  @return: True or False depending on user's confirmation.
3119

3120
  """
3121
  count = len(names)
3122
  msg = ("The %s will operate on %d %s.\n%s"
3123
         "Do you want to continue?" % (text, count, list_type, extra))
3124
  affected = (("\nAffected %s:\n" % list_type) +
3125
              "\n".join(["  %s" % name for name in names]))
3126

    
3127
  choices = [("y", True, "Yes, execute the %s" % text),
3128
             ("n", False, "No, abort the %s" % text)]
3129

    
3130
  if count > 20:
3131
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3132
    question = msg
3133
  else:
3134
    question = msg + affected
3135

    
3136
  choice = AskUser(question, choices)
3137
  if choice == "v":
3138
    choices.pop(1)
3139
    choice = AskUser(msg + affected, choices)
3140
  return choice