Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 4b97f902

History | View | Annotate | Download (97.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "AUTO_PROMOTE_OPT",
55
  "AUTO_REPLACE_OPT",
56
  "BACKEND_OPT",
57
  "BLK_OS_OPT",
58
  "CAPAB_MASTER_OPT",
59
  "CAPAB_VM_OPT",
60
  "CLEANUP_OPT",
61
  "CLUSTER_DOMAIN_SECRET_OPT",
62
  "CONFIRM_OPT",
63
  "CP_SIZE_OPT",
64
  "DEBUG_OPT",
65
  "DEBUG_SIMERR_OPT",
66
  "DISKIDX_OPT",
67
  "DISK_OPT",
68
  "DISK_TEMPLATE_OPT",
69
  "DRAINED_OPT",
70
  "DRY_RUN_OPT",
71
  "DRBD_HELPER_OPT",
72
  "EARLY_RELEASE_OPT",
73
  "ENABLED_HV_OPT",
74
  "ERROR_CODES_OPT",
75
  "FIELDS_OPT",
76
  "FILESTORE_DIR_OPT",
77
  "FILESTORE_DRIVER_OPT",
78
  "FORCE_OPT",
79
  "FORCE_VARIANT_OPT",
80
  "GLOBAL_FILEDIR_OPT",
81
  "HID_OS_OPT",
82
  "GLOBAL_SHARED_FILEDIR_OPT",
83
  "HVLIST_OPT",
84
  "HVOPTS_OPT",
85
  "HYPERVISOR_OPT",
86
  "IALLOCATOR_OPT",
87
  "DEFAULT_IALLOCATOR_OPT",
88
  "IDENTIFY_DEFAULTS_OPT",
89
  "IGNORE_CONSIST_OPT",
90
  "IGNORE_FAILURES_OPT",
91
  "IGNORE_OFFLINE_OPT",
92
  "IGNORE_REMOVE_FAILURES_OPT",
93
  "IGNORE_SECONDARIES_OPT",
94
  "IGNORE_SIZE_OPT",
95
  "INTERVAL_OPT",
96
  "MAC_PREFIX_OPT",
97
  "MAINTAIN_NODE_HEALTH_OPT",
98
  "MASTER_NETDEV_OPT",
99
  "MC_OPT",
100
  "MIGRATION_MODE_OPT",
101
  "NET_OPT",
102
  "NEW_CLUSTER_CERT_OPT",
103
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
104
  "NEW_CONFD_HMAC_KEY_OPT",
105
  "NEW_RAPI_CERT_OPT",
106
  "NEW_SECONDARY_OPT",
107
  "NIC_PARAMS_OPT",
108
  "NODE_FORCE_JOIN_OPT",
109
  "NODE_LIST_OPT",
110
  "NODE_PLACEMENT_OPT",
111
  "NODEGROUP_OPT",
112
  "NODE_PARAMS_OPT",
113
  "NODE_POWERED_OPT",
114
  "NODRBD_STORAGE_OPT",
115
  "NOHDR_OPT",
116
  "NOIPCHECK_OPT",
117
  "NO_INSTALL_OPT",
118
  "NONAMECHECK_OPT",
119
  "NOLVM_STORAGE_OPT",
120
  "NOMODIFY_ETCHOSTS_OPT",
121
  "NOMODIFY_SSH_SETUP_OPT",
122
  "NONICS_OPT",
123
  "NONLIVE_OPT",
124
  "NONPLUS1_OPT",
125
  "NOSHUTDOWN_OPT",
126
  "NOSTART_OPT",
127
  "NOSSH_KEYCHECK_OPT",
128
  "NOVOTING_OPT",
129
  "NWSYNC_OPT",
130
  "ON_PRIMARY_OPT",
131
  "ON_SECONDARY_OPT",
132
  "OFFLINE_OPT",
133
  "OSPARAMS_OPT",
134
  "OS_OPT",
135
  "OS_SIZE_OPT",
136
  "OOB_TIMEOUT_OPT",
137
  "PREALLOC_WIPE_DISKS_OPT",
138
  "PRIMARY_IP_VERSION_OPT",
139
  "PRIORITY_OPT",
140
  "RAPI_CERT_OPT",
141
  "READD_OPT",
142
  "REBOOT_TYPE_OPT",
143
  "REMOVE_INSTANCE_OPT",
144
  "REMOVE_UIDS_OPT",
145
  "RESERVED_LVS_OPT",
146
  "ROMAN_OPT",
147
  "SECONDARY_IP_OPT",
148
  "SELECT_OS_OPT",
149
  "SEP_OPT",
150
  "SHOWCMD_OPT",
151
  "SHUTDOWN_TIMEOUT_OPT",
152
  "SINGLE_NODE_OPT",
153
  "SRC_DIR_OPT",
154
  "SRC_NODE_OPT",
155
  "SUBMIT_OPT",
156
  "STATIC_OPT",
157
  "SYNC_OPT",
158
  "TAG_SRC_OPT",
159
  "TIMEOUT_OPT",
160
  "UIDPOOL_OPT",
161
  "USEUNITS_OPT",
162
  "USE_REPL_NET_OPT",
163
  "VERBOSE_OPT",
164
  "VG_NAME_OPT",
165
  "YES_DOIT_OPT",
166
  # Generic functions for CLI programs
167
  "ConfirmOperation",
168
  "GenericMain",
169
  "GenericInstanceCreate",
170
  "GenericList",
171
  "GenericListFields",
172
  "GetClient",
173
  "GetOnlineNodes",
174
  "JobExecutor",
175
  "JobSubmittedException",
176
  "ParseTimespec",
177
  "RunWhileClusterStopped",
178
  "SubmitOpCode",
179
  "SubmitOrSend",
180
  "UsesRPC",
181
  # Formatting functions
182
  "ToStderr", "ToStdout",
183
  "FormatError",
184
  "FormatQueryResult",
185
  "FormatParameterDict",
186
  "GenerateTable",
187
  "AskUser",
188
  "FormatTimestamp",
189
  "FormatLogMessage",
190
  # Tags functions
191
  "ListTags",
192
  "AddTags",
193
  "RemoveTags",
194
  # command line options support infrastructure
195
  "ARGS_MANY_INSTANCES",
196
  "ARGS_MANY_NODES",
197
  "ARGS_MANY_GROUPS",
198
  "ARGS_NONE",
199
  "ARGS_ONE_INSTANCE",
200
  "ARGS_ONE_NODE",
201
  "ARGS_ONE_GROUP",
202
  "ARGS_ONE_OS",
203
  "ArgChoice",
204
  "ArgCommand",
205
  "ArgFile",
206
  "ArgGroup",
207
  "ArgHost",
208
  "ArgInstance",
209
  "ArgJobId",
210
  "ArgNode",
211
  "ArgOs",
212
  "ArgSuggest",
213
  "ArgUnknown",
214
  "OPT_COMPL_INST_ADD_NODES",
215
  "OPT_COMPL_MANY_NODES",
216
  "OPT_COMPL_ONE_IALLOCATOR",
217
  "OPT_COMPL_ONE_INSTANCE",
218
  "OPT_COMPL_ONE_NODE",
219
  "OPT_COMPL_ONE_NODEGROUP",
220
  "OPT_COMPL_ONE_OS",
221
  "cli_option",
222
  "SplitNodeOption",
223
  "CalculateOSNames",
224
  "ParseFields",
225
  "COMMON_CREATE_OPTS",
226
  ]
227

    
228
NO_PREFIX = "no_"
229
UN_PREFIX = "-"
230

    
231
#: Priorities (sorted)
232
_PRIORITY_NAMES = [
233
  ("low", constants.OP_PRIO_LOW),
234
  ("normal", constants.OP_PRIO_NORMAL),
235
  ("high", constants.OP_PRIO_HIGH),
236
  ]
237

    
238
#: Priority dictionary for easier lookup
239
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
240
# we migrate to Python 2.6
241
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
242

    
243
# Query result status for clients
244
(QR_NORMAL,
245
 QR_UNKNOWN,
246
 QR_INCOMPLETE) = range(3)
247

    
248

    
249
class _Argument:
250
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
251
    self.min = min
252
    self.max = max
253

    
254
  def __repr__(self):
255
    return ("<%s min=%s max=%s>" %
256
            (self.__class__.__name__, self.min, self.max))
257

    
258

    
259
class ArgSuggest(_Argument):
260
  """Suggesting argument.
261

262
  Value can be any of the ones passed to the constructor.
263

264
  """
265
  # pylint: disable-msg=W0622
266
  def __init__(self, min=0, max=None, choices=None):
267
    _Argument.__init__(self, min=min, max=max)
268
    self.choices = choices
269

    
270
  def __repr__(self):
271
    return ("<%s min=%s max=%s choices=%r>" %
272
            (self.__class__.__name__, self.min, self.max, self.choices))
273

    
274

    
275
class ArgChoice(ArgSuggest):
276
  """Choice argument.
277

278
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
279
  but value must be one of the choices.
280

281
  """
282

    
283

    
284
class ArgUnknown(_Argument):
285
  """Unknown argument to program (e.g. determined at runtime).
286

287
  """
288

    
289

    
290
class ArgInstance(_Argument):
291
  """Instances argument.
292

293
  """
294

    
295

    
296
class ArgNode(_Argument):
297
  """Node argument.
298

299
  """
300

    
301

    
302
class ArgGroup(_Argument):
303
  """Node group argument.
304

305
  """
306

    
307

    
308
class ArgJobId(_Argument):
309
  """Job ID argument.
310

311
  """
312

    
313

    
314
class ArgFile(_Argument):
315
  """File path argument.
316

317
  """
318

    
319

    
320
class ArgCommand(_Argument):
321
  """Command argument.
322

323
  """
324

    
325

    
326
class ArgHost(_Argument):
327
  """Host argument.
328

329
  """
330

    
331

    
332
class ArgOs(_Argument):
333
  """OS argument.
334

335
  """
336

    
337

    
338
ARGS_NONE = []
339
ARGS_MANY_INSTANCES = [ArgInstance()]
340
ARGS_MANY_NODES = [ArgNode()]
341
ARGS_MANY_GROUPS = [ArgGroup()]
342
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
343
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
344
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
345
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
346

    
347

    
348
def _ExtractTagsObject(opts, args):
349
  """Extract the tag type object.
350

351
  Note that this function will modify its args parameter.
352

353
  """
354
  if not hasattr(opts, "tag_type"):
355
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
356
  kind = opts.tag_type
357
  if kind == constants.TAG_CLUSTER:
358
    retval = kind, kind
359
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
360
    if not args:
361
      raise errors.OpPrereqError("no arguments passed to the command")
362
    name = args.pop(0)
363
    retval = kind, name
364
  else:
365
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
366
  return retval
367

    
368

    
369
def _ExtendTags(opts, args):
370
  """Extend the args if a source file has been given.
371

372
  This function will extend the tags with the contents of the file
373
  passed in the 'tags_source' attribute of the opts parameter. A file
374
  named '-' will be replaced by stdin.
375

376
  """
377
  fname = opts.tags_source
378
  if fname is None:
379
    return
380
  if fname == "-":
381
    new_fh = sys.stdin
382
  else:
383
    new_fh = open(fname, "r")
384
  new_data = []
385
  try:
386
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
387
    # because of python bug 1633941
388
    while True:
389
      line = new_fh.readline()
390
      if not line:
391
        break
392
      new_data.append(line.strip())
393
  finally:
394
    new_fh.close()
395
  args.extend(new_data)
396

    
397

    
398
def ListTags(opts, args):
399
  """List the tags on a given object.
400

401
  This is a generic implementation that knows how to deal with all
402
  three cases of tag objects (cluster, node, instance). The opts
403
  argument is expected to contain a tag_type field denoting what
404
  object type we work on.
405

406
  """
407
  kind, name = _ExtractTagsObject(opts, args)
408
  cl = GetClient()
409
  result = cl.QueryTags(kind, name)
410
  result = list(result)
411
  result.sort()
412
  for tag in result:
413
    ToStdout(tag)
414

    
415

    
416
def AddTags(opts, args):
417
  """Add tags on a given object.
418

419
  This is a generic implementation that knows how to deal with all
420
  three cases of tag objects (cluster, node, instance). The opts
421
  argument is expected to contain a tag_type field denoting what
422
  object type we work on.
423

424
  """
425
  kind, name = _ExtractTagsObject(opts, args)
426
  _ExtendTags(opts, args)
427
  if not args:
428
    raise errors.OpPrereqError("No tags to be added")
429
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
430
  SubmitOpCode(op, opts=opts)
431

    
432

    
433
def RemoveTags(opts, args):
434
  """Remove tags from a given object.
435

436
  This is a generic implementation that knows how to deal with all
437
  three cases of tag objects (cluster, node, instance). The opts
438
  argument is expected to contain a tag_type field denoting what
439
  object type we work on.
440

441
  """
442
  kind, name = _ExtractTagsObject(opts, args)
443
  _ExtendTags(opts, args)
444
  if not args:
445
    raise errors.OpPrereqError("No tags to be removed")
446
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
447
  SubmitOpCode(op, opts=opts)
448

    
449

    
450
def check_unit(option, opt, value): # pylint: disable-msg=W0613
451
  """OptParsers custom converter for units.
452

453
  """
454
  try:
455
    return utils.ParseUnit(value)
456
  except errors.UnitParseError, err:
457
    raise OptionValueError("option %s: %s" % (opt, err))
458

    
459

    
460
def _SplitKeyVal(opt, data):
461
  """Convert a KeyVal string into a dict.
462

463
  This function will convert a key=val[,...] string into a dict. Empty
464
  values will be converted specially: keys which have the prefix 'no_'
465
  will have the value=False and the prefix stripped, the others will
466
  have value=True.
467

468
  @type opt: string
469
  @param opt: a string holding the option name for which we process the
470
      data, used in building error messages
471
  @type data: string
472
  @param data: a string of the format key=val,key=val,...
473
  @rtype: dict
474
  @return: {key=val, key=val}
475
  @raises errors.ParameterError: if there are duplicate keys
476

477
  """
478
  kv_dict = {}
479
  if data:
480
    for elem in utils.UnescapeAndSplit(data, sep=","):
481
      if "=" in elem:
482
        key, val = elem.split("=", 1)
483
      else:
484
        if elem.startswith(NO_PREFIX):
485
          key, val = elem[len(NO_PREFIX):], False
486
        elif elem.startswith(UN_PREFIX):
487
          key, val = elem[len(UN_PREFIX):], None
488
        else:
489
          key, val = elem, True
490
      if key in kv_dict:
491
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
492
                                    (key, opt))
493
      kv_dict[key] = val
494
  return kv_dict
495

    
496

    
497
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
498
  """Custom parser for ident:key=val,key=val options.
499

500
  This will store the parsed values as a tuple (ident, {key: val}). As such,
501
  multiple uses of this option via action=append is possible.
502

503
  """
504
  if ":" not in value:
505
    ident, rest = value, ''
506
  else:
507
    ident, rest = value.split(":", 1)
508

    
509
  if ident.startswith(NO_PREFIX):
510
    if rest:
511
      msg = "Cannot pass options when removing parameter groups: %s" % value
512
      raise errors.ParameterError(msg)
513
    retval = (ident[len(NO_PREFIX):], False)
514
  elif ident.startswith(UN_PREFIX):
515
    if rest:
516
      msg = "Cannot pass options when removing parameter groups: %s" % value
517
      raise errors.ParameterError(msg)
518
    retval = (ident[len(UN_PREFIX):], None)
519
  else:
520
    kv_dict = _SplitKeyVal(opt, rest)
521
    retval = (ident, kv_dict)
522
  return retval
523

    
524

    
525
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
526
  """Custom parser class for key=val,key=val options.
527

528
  This will store the parsed values as a dict {key: val}.
529

530
  """
531
  return _SplitKeyVal(opt, value)
532

    
533

    
534
def check_bool(option, opt, value): # pylint: disable-msg=W0613
535
  """Custom parser for yes/no options.
536

537
  This will store the parsed value as either True or False.
538

539
  """
540
  value = value.lower()
541
  if value == constants.VALUE_FALSE or value == "no":
542
    return False
543
  elif value == constants.VALUE_TRUE or value == "yes":
544
    return True
545
  else:
546
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
547

    
548

    
549
# completion_suggestion is normally a list. Using numeric values not evaluating
550
# to False for dynamic completion.
551
(OPT_COMPL_MANY_NODES,
552
 OPT_COMPL_ONE_NODE,
553
 OPT_COMPL_ONE_INSTANCE,
554
 OPT_COMPL_ONE_OS,
555
 OPT_COMPL_ONE_IALLOCATOR,
556
 OPT_COMPL_INST_ADD_NODES,
557
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
558

    
559
OPT_COMPL_ALL = frozenset([
560
  OPT_COMPL_MANY_NODES,
561
  OPT_COMPL_ONE_NODE,
562
  OPT_COMPL_ONE_INSTANCE,
563
  OPT_COMPL_ONE_OS,
564
  OPT_COMPL_ONE_IALLOCATOR,
565
  OPT_COMPL_INST_ADD_NODES,
566
  OPT_COMPL_ONE_NODEGROUP,
567
  ])
568

    
569

    
570
class CliOption(Option):
571
  """Custom option class for optparse.
572

573
  """
574
  ATTRS = Option.ATTRS + [
575
    "completion_suggest",
576
    ]
577
  TYPES = Option.TYPES + (
578
    "identkeyval",
579
    "keyval",
580
    "unit",
581
    "bool",
582
    )
583
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
584
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
585
  TYPE_CHECKER["keyval"] = check_key_val
586
  TYPE_CHECKER["unit"] = check_unit
587
  TYPE_CHECKER["bool"] = check_bool
588

    
589

    
590
# optparse.py sets make_option, so we do it for our own option class, too
591
cli_option = CliOption
592

    
593

    
594
_YORNO = "yes|no"
595

    
596
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
597
                       help="Increase debugging level")
598

    
599
NOHDR_OPT = cli_option("--no-headers", default=False,
600
                       action="store_true", dest="no_headers",
601
                       help="Don't display column headers")
602

    
603
SEP_OPT = cli_option("--separator", default=None,
604
                     action="store", dest="separator",
605
                     help=("Separator between output fields"
606
                           " (defaults to one space)"))
607

    
608
USEUNITS_OPT = cli_option("--units", default=None,
609
                          dest="units", choices=('h', 'm', 'g', 't'),
610
                          help="Specify units for output (one of h/m/g/t)")
611

    
612
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
613
                        type="string", metavar="FIELDS",
614
                        help="Comma separated list of output fields")
615

    
616
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
617
                       default=False, help="Force the operation")
618

    
619
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
620
                         default=False, help="Do not require confirmation")
621

    
622
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
623
                                  action="store_true", default=False,
624
                                  help=("Ignore offline nodes and do as much"
625
                                        " as possible"))
626

    
627
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
628
                         default=None, help="File with tag names")
629

    
630
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
631
                        default=False, action="store_true",
632
                        help=("Submit the job and return the job ID, but"
633
                              " don't wait for the job to finish"))
634

    
635
SYNC_OPT = cli_option("--sync", dest="do_locking",
636
                      default=False, action="store_true",
637
                      help=("Grab locks while doing the queries"
638
                            " in order to ensure more consistent results"))
639

    
640
DRY_RUN_OPT = cli_option("--dry-run", default=False,
641
                         action="store_true",
642
                         help=("Do not execute the operation, just run the"
643
                               " check steps and verify it it could be"
644
                               " executed"))
645

    
646
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
647
                         action="store_true",
648
                         help="Increase the verbosity of the operation")
649

    
650
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
651
                              action="store_true", dest="simulate_errors",
652
                              help="Debugging option that makes the operation"
653
                              " treat most runtime checks as failed")
654

    
655
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
656
                        default=True, action="store_false",
657
                        help="Don't wait for sync (DANGEROUS!)")
658

    
659
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
660
                               help="Custom disk setup (diskless, file,"
661
                               " plain or drbd)",
662
                               default=None, metavar="TEMPL",
663
                               choices=list(constants.DISK_TEMPLATES))
664

    
665
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
666
                        help="Do not create any network cards for"
667
                        " the instance")
668

    
669
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
670
                               help="Relative path under default cluster-wide"
671
                               " file storage dir to store file-based disks",
672
                               default=None, metavar="<DIR>")
673

    
674
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
675
                                  help="Driver to use for image files",
676
                                  default="loop", metavar="<DRIVER>",
677
                                  choices=list(constants.FILE_DRIVER))
678

    
679
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
680
                            help="Select nodes for the instance automatically"
681
                            " using the <NAME> iallocator plugin",
682
                            default=None, type="string",
683
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
684

    
685
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
686
                            metavar="<NAME>",
687
                            help="Set the default instance allocator plugin",
688
                            default=None, type="string",
689
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
690

    
691
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
692
                    metavar="<os>",
693
                    completion_suggest=OPT_COMPL_ONE_OS)
694

    
695
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
696
                         type="keyval", default={},
697
                         help="OS parameters")
698

    
699
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
700
                               action="store_true", default=False,
701
                               help="Force an unknown variant")
702

    
703
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
704
                            action="store_true", default=False,
705
                            help="Do not install the OS (will"
706
                            " enable no-start)")
707

    
708
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
709
                         type="keyval", default={},
710
                         help="Backend parameters")
711

    
712
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
713
                         default={}, dest="hvparams",
714
                         help="Hypervisor parameters")
715

    
716
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
717
                            help="Hypervisor and hypervisor options, in the"
718
                            " format hypervisor:option=value,option=value,...",
719
                            default=None, type="identkeyval")
720

    
721
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
722
                        help="Hypervisor and hypervisor options, in the"
723
                        " format hypervisor:option=value,option=value,...",
724
                        default=[], action="append", type="identkeyval")
725

    
726
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
727
                           action="store_false",
728
                           help="Don't check that the instance's IP"
729
                           " is alive")
730

    
731
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
732
                             default=True, action="store_false",
733
                             help="Don't check that the instance's name"
734
                             " is resolvable")
735

    
736
NET_OPT = cli_option("--net",
737
                     help="NIC parameters", default=[],
738
                     dest="nics", action="append", type="identkeyval")
739

    
740
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
741
                      dest="disks", action="append", type="identkeyval")
742

    
743
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
744
                         help="Comma-separated list of disks"
745
                         " indices to act on (e.g. 0,2) (optional,"
746
                         " defaults to all disks)")
747

    
748
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
749
                         help="Enforces a single-disk configuration using the"
750
                         " given disk size, in MiB unless a suffix is used",
751
                         default=None, type="unit", metavar="<size>")
752

    
753
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
754
                                dest="ignore_consistency",
755
                                action="store_true", default=False,
756
                                help="Ignore the consistency of the disks on"
757
                                " the secondary")
758

    
759
NONLIVE_OPT = cli_option("--non-live", dest="live",
760
                         default=True, action="store_false",
761
                         help="Do a non-live migration (this usually means"
762
                         " freeze the instance, save the state, transfer and"
763
                         " only then resume running on the secondary node)")
764

    
765
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
766
                                default=None,
767
                                choices=list(constants.HT_MIGRATION_MODES),
768
                                help="Override default migration mode (choose"
769
                                " either live or non-live")
770

    
771
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
772
                                help="Target node and optional secondary node",
773
                                metavar="<pnode>[:<snode>]",
774
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
775

    
776
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
777
                           action="append", metavar="<node>",
778
                           help="Use only this node (can be used multiple"
779
                           " times, if not given defaults to all nodes)",
780
                           completion_suggest=OPT_COMPL_ONE_NODE)
781

    
782
NODEGROUP_OPT = cli_option("-g", "--node-group",
783
                           dest="nodegroup",
784
                           help="Node group (name or uuid)",
785
                           metavar="<nodegroup>",
786
                           default=None, type="string",
787
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
788

    
789
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
790
                             metavar="<node>",
791
                             completion_suggest=OPT_COMPL_ONE_NODE)
792

    
793
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
794
                         action="store_false",
795
                         help="Don't start the instance after creation")
796

    
797
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
798
                         action="store_true", default=False,
799
                         help="Show command instead of executing it")
800

    
801
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
802
                         default=False, action="store_true",
803
                         help="Instead of performing the migration, try to"
804
                         " recover from a failed cleanup. This is safe"
805
                         " to run even if the instance is healthy, but it"
806
                         " will create extra replication traffic and "
807
                         " disrupt briefly the replication (like during the"
808
                         " migration")
809

    
810
STATIC_OPT = cli_option("-s", "--static", dest="static",
811
                        action="store_true", default=False,
812
                        help="Only show configuration data, not runtime data")
813

    
814
ALL_OPT = cli_option("--all", dest="show_all",
815
                     default=False, action="store_true",
816
                     help="Show info on all instances on the cluster."
817
                     " This can take a long time to run, use wisely")
818

    
819
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
820
                           action="store_true", default=False,
821
                           help="Interactive OS reinstall, lists available"
822
                           " OS templates for selection")
823

    
824
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
825
                                 action="store_true", default=False,
826
                                 help="Remove the instance from the cluster"
827
                                 " configuration even if there are failures"
828
                                 " during the removal process")
829

    
830
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
831
                                        dest="ignore_remove_failures",
832
                                        action="store_true", default=False,
833
                                        help="Remove the instance from the"
834
                                        " cluster configuration even if there"
835
                                        " are failures during the removal"
836
                                        " process")
837

    
838
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
839
                                 action="store_true", default=False,
840
                                 help="Remove the instance from the cluster")
841

    
842
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
843
                               help="Specifies the new secondary node",
844
                               metavar="NODE", default=None,
845
                               completion_suggest=OPT_COMPL_ONE_NODE)
846

    
847
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
848
                            default=False, action="store_true",
849
                            help="Replace the disk(s) on the primary"
850
                            " node (only for the drbd template)")
851

    
852
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
853
                              default=False, action="store_true",
854
                              help="Replace the disk(s) on the secondary"
855
                              " node (only for the drbd template)")
856

    
857
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
858
                              default=False, action="store_true",
859
                              help="Lock all nodes and auto-promote as needed"
860
                              " to MC status")
861

    
862
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
863
                              default=False, action="store_true",
864
                              help="Automatically replace faulty disks"
865
                              " (only for the drbd template)")
866

    
867
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
868
                             default=False, action="store_true",
869
                             help="Ignore current recorded size"
870
                             " (useful for forcing activation when"
871
                             " the recorded size is wrong)")
872

    
873
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
874
                          metavar="<node>",
875
                          completion_suggest=OPT_COMPL_ONE_NODE)
876

    
877
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
878
                         metavar="<dir>")
879

    
880
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
881
                              help="Specify the secondary ip for the node",
882
                              metavar="ADDRESS", default=None)
883

    
884
READD_OPT = cli_option("--readd", dest="readd",
885
                       default=False, action="store_true",
886
                       help="Readd old node after replacing it")
887

    
888
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
889
                                default=True, action="store_false",
890
                                help="Disable SSH key fingerprint checking")
891

    
892
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
893
                                 default=False, action="store_true",
894
                                 help="Force the joining of a node,"
895
                                      " needed when merging clusters")
896

    
897
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
898
                    type="bool", default=None, metavar=_YORNO,
899
                    help="Set the master_candidate flag on the node")
900

    
901
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
902
                         type="bool", default=None,
903
                         help=("Set the offline flag on the node"
904
                               " (cluster does not communicate with offline"
905
                               " nodes)"))
906

    
907
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
908
                         type="bool", default=None,
909
                         help=("Set the drained flag on the node"
910
                               " (excluded from allocation operations)"))
911

    
912
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
913
                    type="bool", default=None, metavar=_YORNO,
914
                    help="Set the master_capable flag on the node")
915

    
916
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
917
                    type="bool", default=None, metavar=_YORNO,
918
                    help="Set the vm_capable flag on the node")
919

    
920
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
921
                             type="bool", default=None, metavar=_YORNO,
922
                             help="Set the allocatable flag on a volume")
923

    
924
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
925
                               help="Disable support for lvm based instances"
926
                               " (cluster-wide)",
927
                               action="store_false", default=True)
928

    
929
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
930
                            dest="enabled_hypervisors",
931
                            help="Comma-separated list of hypervisors",
932
                            type="string", default=None)
933

    
934
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
935
                            type="keyval", default={},
936
                            help="NIC parameters")
937

    
938
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
939
                         dest="candidate_pool_size", type="int",
940
                         help="Set the candidate pool size")
941

    
942
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
943
                         help=("Enables LVM and specifies the volume group"
944
                               " name (cluster-wide) for disk allocation"
945
                               " [%s]" % constants.DEFAULT_VG),
946
                         metavar="VG", default=None)
947

    
948
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
949
                          help="Destroy cluster", action="store_true")
950

    
951
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
952
                          help="Skip node agreement check (dangerous)",
953
                          action="store_true", default=False)
954

    
955
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
956
                            help="Specify the mac prefix for the instance IP"
957
                            " addresses, in the format XX:XX:XX",
958
                            metavar="PREFIX",
959
                            default=None)
960

    
961
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
962
                               help="Specify the node interface (cluster-wide)"
963
                               " on which the master IP address will be added"
964
                               " (cluster init default: %s)" %
965
                               constants.DEFAULT_BRIDGE,
966
                               metavar="NETDEV",
967
                               default=None)
968

    
969
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
970
                                help="Specify the default directory (cluster-"
971
                                "wide) for storing the file-based disks [%s]" %
972
                                constants.DEFAULT_FILE_STORAGE_DIR,
973
                                metavar="DIR",
974
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
975

    
976
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
977
                            dest="shared_file_storage_dir",
978
                            help="Specify the default directory (cluster-"
979
                            "wide) for storing the shared file-based"
980
                            " disks [%s]" %
981
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
982
                            metavar="SHAREDDIR",
983
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
984

    
985
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
986
                                   help="Don't modify /etc/hosts",
987
                                   action="store_false", default=True)
988

    
989
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
990
                                    help="Don't initialize SSH keys",
991
                                    action="store_false", default=True)
992

    
993
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
994
                             help="Enable parseable error messages",
995
                             action="store_true", default=False)
996

    
997
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
998
                          help="Skip N+1 memory redundancy tests",
999
                          action="store_true", default=False)
1000

    
1001
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1002
                             help="Type of reboot: soft/hard/full",
1003
                             default=constants.INSTANCE_REBOOT_HARD,
1004
                             metavar="<REBOOT>",
1005
                             choices=list(constants.REBOOT_TYPES))
1006

    
1007
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1008
                                    dest="ignore_secondaries",
1009
                                    default=False, action="store_true",
1010
                                    help="Ignore errors from secondaries")
1011

    
1012
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1013
                            action="store_false", default=True,
1014
                            help="Don't shutdown the instance (unsafe)")
1015

    
1016
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1017
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1018
                         help="Maximum time to wait")
1019

    
1020
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1021
                         dest="shutdown_timeout", type="int",
1022
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1023
                         help="Maximum time to wait for instance shutdown")
1024

    
1025
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1026
                          default=None,
1027
                          help=("Number of seconds between repetions of the"
1028
                                " command"))
1029

    
1030
EARLY_RELEASE_OPT = cli_option("--early-release",
1031
                               dest="early_release", default=False,
1032
                               action="store_true",
1033
                               help="Release the locks on the secondary"
1034
                               " node(s) early")
1035

    
1036
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1037
                                  dest="new_cluster_cert",
1038
                                  default=False, action="store_true",
1039
                                  help="Generate a new cluster certificate")
1040

    
1041
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1042
                           default=None,
1043
                           help="File containing new RAPI certificate")
1044

    
1045
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1046
                               default=None, action="store_true",
1047
                               help=("Generate a new self-signed RAPI"
1048
                                     " certificate"))
1049

    
1050
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1051
                                    dest="new_confd_hmac_key",
1052
                                    default=False, action="store_true",
1053
                                    help=("Create a new HMAC key for %s" %
1054
                                          constants.CONFD))
1055

    
1056
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1057
                                       dest="cluster_domain_secret",
1058
                                       default=None,
1059
                                       help=("Load new new cluster domain"
1060
                                             " secret from file"))
1061

    
1062
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1063
                                           dest="new_cluster_domain_secret",
1064
                                           default=False, action="store_true",
1065
                                           help=("Create a new cluster domain"
1066
                                                 " secret"))
1067

    
1068
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1069
                              dest="use_replication_network",
1070
                              help="Whether to use the replication network"
1071
                              " for talking to the nodes",
1072
                              action="store_true", default=False)
1073

    
1074
MAINTAIN_NODE_HEALTH_OPT = \
1075
    cli_option("--maintain-node-health", dest="maintain_node_health",
1076
               metavar=_YORNO, default=None, type="bool",
1077
               help="Configure the cluster to automatically maintain node"
1078
               " health, by shutting down unknown instances, shutting down"
1079
               " unknown DRBD devices, etc.")
1080

    
1081
IDENTIFY_DEFAULTS_OPT = \
1082
    cli_option("--identify-defaults", dest="identify_defaults",
1083
               default=False, action="store_true",
1084
               help="Identify which saved instance parameters are equal to"
1085
               " the current cluster defaults and set them as such, instead"
1086
               " of marking them as overridden")
1087

    
1088
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1089
                         action="store", dest="uid_pool",
1090
                         help=("A list of user-ids or user-id"
1091
                               " ranges separated by commas"))
1092

    
1093
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1094
                          action="store", dest="add_uids",
1095
                          help=("A list of user-ids or user-id"
1096
                                " ranges separated by commas, to be"
1097
                                " added to the user-id pool"))
1098

    
1099
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1100
                             action="store", dest="remove_uids",
1101
                             help=("A list of user-ids or user-id"
1102
                                   " ranges separated by commas, to be"
1103
                                   " removed from the user-id pool"))
1104

    
1105
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1106
                             action="store", dest="reserved_lvs",
1107
                             help=("A comma-separated list of reserved"
1108
                                   " logical volumes names, that will be"
1109
                                   " ignored by cluster verify"))
1110

    
1111
ROMAN_OPT = cli_option("--roman",
1112
                       dest="roman_integers", default=False,
1113
                       action="store_true",
1114
                       help="Use roman numbers for positive integers")
1115

    
1116
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1117
                             action="store", default=None,
1118
                             help="Specifies usermode helper for DRBD")
1119

    
1120
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1121
                                action="store_false", default=True,
1122
                                help="Disable support for DRBD")
1123

    
1124
PRIMARY_IP_VERSION_OPT = \
1125
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1126
               action="store", dest="primary_ip_version",
1127
               metavar="%d|%d" % (constants.IP4_VERSION,
1128
                                  constants.IP6_VERSION),
1129
               help="Cluster-wide IP version for primary IP")
1130

    
1131
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1132
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1133
                          choices=_PRIONAME_TO_VALUE.keys(),
1134
                          help="Priority for opcode processing")
1135

    
1136
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1137
                        type="bool", default=None, metavar=_YORNO,
1138
                        help="Sets the hidden flag on the OS")
1139

    
1140
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1141
                        type="bool", default=None, metavar=_YORNO,
1142
                        help="Sets the blacklisted flag on the OS")
1143

    
1144
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1145
                                     type="bool", metavar=_YORNO,
1146
                                     dest="prealloc_wipe_disks",
1147
                                     help=("Wipe disks prior to instance"
1148
                                           " creation"))
1149

    
1150
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1151
                             type="keyval", default=None,
1152
                             help="Node parameters")
1153

    
1154
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1155
                              action="store", metavar="POLICY", default=None,
1156
                              help="Allocation policy for the node group")
1157

    
1158
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1159
                              type="bool", metavar=_YORNO,
1160
                              dest="node_powered",
1161
                              help="Specify if the SoR for node is powered")
1162

    
1163
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1164
                         default=constants.OOB_TIMEOUT,
1165
                         help="Maximum time to wait for out-of-band helper")
1166

    
1167

    
1168
#: Options provided by all commands
1169
COMMON_OPTS = [DEBUG_OPT]
1170

    
1171
# common options for creating instances. add and import then add their own
1172
# specific ones.
1173
COMMON_CREATE_OPTS = [
1174
  BACKEND_OPT,
1175
  DISK_OPT,
1176
  DISK_TEMPLATE_OPT,
1177
  FILESTORE_DIR_OPT,
1178
  FILESTORE_DRIVER_OPT,
1179
  HYPERVISOR_OPT,
1180
  IALLOCATOR_OPT,
1181
  NET_OPT,
1182
  NODE_PLACEMENT_OPT,
1183
  NOIPCHECK_OPT,
1184
  NONAMECHECK_OPT,
1185
  NONICS_OPT,
1186
  NWSYNC_OPT,
1187
  OSPARAMS_OPT,
1188
  OS_SIZE_OPT,
1189
  SUBMIT_OPT,
1190
  DRY_RUN_OPT,
1191
  PRIORITY_OPT,
1192
  ]
1193

    
1194

    
1195
def _ParseArgs(argv, commands, aliases):
1196
  """Parser for the command line arguments.
1197

1198
  This function parses the arguments and returns the function which
1199
  must be executed together with its (modified) arguments.
1200

1201
  @param argv: the command line
1202
  @param commands: dictionary with special contents, see the design
1203
      doc for cmdline handling
1204
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1205

1206
  """
1207
  if len(argv) == 0:
1208
    binary = "<command>"
1209
  else:
1210
    binary = argv[0].split("/")[-1]
1211

    
1212
  if len(argv) > 1 and argv[1] == "--version":
1213
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1214
             constants.RELEASE_VERSION)
1215
    # Quit right away. That way we don't have to care about this special
1216
    # argument. optparse.py does it the same.
1217
    sys.exit(0)
1218

    
1219
  if len(argv) < 2 or not (argv[1] in commands or
1220
                           argv[1] in aliases):
1221
    # let's do a nice thing
1222
    sortedcmds = commands.keys()
1223
    sortedcmds.sort()
1224

    
1225
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1226
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1227
    ToStdout("")
1228

    
1229
    # compute the max line length for cmd + usage
1230
    mlen = max([len(" %s" % cmd) for cmd in commands])
1231
    mlen = min(60, mlen) # should not get here...
1232

    
1233
    # and format a nice command list
1234
    ToStdout("Commands:")
1235
    for cmd in sortedcmds:
1236
      cmdstr = " %s" % (cmd,)
1237
      help_text = commands[cmd][4]
1238
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1239
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1240
      for line in help_lines:
1241
        ToStdout("%-*s   %s", mlen, "", line)
1242

    
1243
    ToStdout("")
1244

    
1245
    return None, None, None
1246

    
1247
  # get command, unalias it, and look it up in commands
1248
  cmd = argv.pop(1)
1249
  if cmd in aliases:
1250
    if cmd in commands:
1251
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1252
                                   " command" % cmd)
1253

    
1254
    if aliases[cmd] not in commands:
1255
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1256
                                   " command '%s'" % (cmd, aliases[cmd]))
1257

    
1258
    cmd = aliases[cmd]
1259

    
1260
  func, args_def, parser_opts, usage, description = commands[cmd]
1261
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1262
                        description=description,
1263
                        formatter=TitledHelpFormatter(),
1264
                        usage="%%prog %s %s" % (cmd, usage))
1265
  parser.disable_interspersed_args()
1266
  options, args = parser.parse_args()
1267

    
1268
  if not _CheckArguments(cmd, args_def, args):
1269
    return None, None, None
1270

    
1271
  return func, options, args
1272

    
1273

    
1274
def _CheckArguments(cmd, args_def, args):
1275
  """Verifies the arguments using the argument definition.
1276

1277
  Algorithm:
1278

1279
    1. Abort with error if values specified by user but none expected.
1280

1281
    1. For each argument in definition
1282

1283
      1. Keep running count of minimum number of values (min_count)
1284
      1. Keep running count of maximum number of values (max_count)
1285
      1. If it has an unlimited number of values
1286

1287
        1. Abort with error if it's not the last argument in the definition
1288

1289
    1. If last argument has limited number of values
1290

1291
      1. Abort with error if number of values doesn't match or is too large
1292

1293
    1. Abort with error if user didn't pass enough values (min_count)
1294

1295
  """
1296
  if args and not args_def:
1297
    ToStderr("Error: Command %s expects no arguments", cmd)
1298
    return False
1299

    
1300
  min_count = None
1301
  max_count = None
1302
  check_max = None
1303

    
1304
  last_idx = len(args_def) - 1
1305

    
1306
  for idx, arg in enumerate(args_def):
1307
    if min_count is None:
1308
      min_count = arg.min
1309
    elif arg.min is not None:
1310
      min_count += arg.min
1311

    
1312
    if max_count is None:
1313
      max_count = arg.max
1314
    elif arg.max is not None:
1315
      max_count += arg.max
1316

    
1317
    if idx == last_idx:
1318
      check_max = (arg.max is not None)
1319

    
1320
    elif arg.max is None:
1321
      raise errors.ProgrammerError("Only the last argument can have max=None")
1322

    
1323
  if check_max:
1324
    # Command with exact number of arguments
1325
    if (min_count is not None and max_count is not None and
1326
        min_count == max_count and len(args) != min_count):
1327
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1328
      return False
1329

    
1330
    # Command with limited number of arguments
1331
    if max_count is not None and len(args) > max_count:
1332
      ToStderr("Error: Command %s expects only %d argument(s)",
1333
               cmd, max_count)
1334
      return False
1335

    
1336
  # Command with some required arguments
1337
  if min_count is not None and len(args) < min_count:
1338
    ToStderr("Error: Command %s expects at least %d argument(s)",
1339
             cmd, min_count)
1340
    return False
1341

    
1342
  return True
1343

    
1344

    
1345
def SplitNodeOption(value):
1346
  """Splits the value of a --node option.
1347

1348
  """
1349
  if value and ':' in value:
1350
    return value.split(':', 1)
1351
  else:
1352
    return (value, None)
1353

    
1354

    
1355
def CalculateOSNames(os_name, os_variants):
1356
  """Calculates all the names an OS can be called, according to its variants.
1357

1358
  @type os_name: string
1359
  @param os_name: base name of the os
1360
  @type os_variants: list or None
1361
  @param os_variants: list of supported variants
1362
  @rtype: list
1363
  @return: list of valid names
1364

1365
  """
1366
  if os_variants:
1367
    return ['%s+%s' % (os_name, v) for v in os_variants]
1368
  else:
1369
    return [os_name]
1370

    
1371

    
1372
def ParseFields(selected, default):
1373
  """Parses the values of "--field"-like options.
1374

1375
  @type selected: string or None
1376
  @param selected: User-selected options
1377
  @type default: list
1378
  @param default: Default fields
1379

1380
  """
1381
  if selected is None:
1382
    return default
1383

    
1384
  if selected.startswith("+"):
1385
    return default + selected[1:].split(",")
1386

    
1387
  return selected.split(",")
1388

    
1389

    
1390
UsesRPC = rpc.RunWithRPC
1391

    
1392

    
1393
def AskUser(text, choices=None):
1394
  """Ask the user a question.
1395

1396
  @param text: the question to ask
1397

1398
  @param choices: list with elements tuples (input_char, return_value,
1399
      description); if not given, it will default to: [('y', True,
1400
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1401
      note that the '?' char is reserved for help
1402

1403
  @return: one of the return values from the choices list; if input is
1404
      not possible (i.e. not running with a tty, we return the last
1405
      entry from the list
1406

1407
  """
1408
  if choices is None:
1409
    choices = [('y', True, 'Perform the operation'),
1410
               ('n', False, 'Do not perform the operation')]
1411
  if not choices or not isinstance(choices, list):
1412
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1413
  for entry in choices:
1414
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1415
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1416

    
1417
  answer = choices[-1][1]
1418
  new_text = []
1419
  for line in text.splitlines():
1420
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1421
  text = "\n".join(new_text)
1422
  try:
1423
    f = file("/dev/tty", "a+")
1424
  except IOError:
1425
    return answer
1426
  try:
1427
    chars = [entry[0] for entry in choices]
1428
    chars[-1] = "[%s]" % chars[-1]
1429
    chars.append('?')
1430
    maps = dict([(entry[0], entry[1]) for entry in choices])
1431
    while True:
1432
      f.write(text)
1433
      f.write('\n')
1434
      f.write("/".join(chars))
1435
      f.write(": ")
1436
      line = f.readline(2).strip().lower()
1437
      if line in maps:
1438
        answer = maps[line]
1439
        break
1440
      elif line == '?':
1441
        for entry in choices:
1442
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1443
        f.write("\n")
1444
        continue
1445
  finally:
1446
    f.close()
1447
  return answer
1448

    
1449

    
1450
class JobSubmittedException(Exception):
1451
  """Job was submitted, client should exit.
1452

1453
  This exception has one argument, the ID of the job that was
1454
  submitted. The handler should print this ID.
1455

1456
  This is not an error, just a structured way to exit from clients.
1457

1458
  """
1459

    
1460

    
1461
def SendJob(ops, cl=None):
1462
  """Function to submit an opcode without waiting for the results.
1463

1464
  @type ops: list
1465
  @param ops: list of opcodes
1466
  @type cl: luxi.Client
1467
  @param cl: the luxi client to use for communicating with the master;
1468
             if None, a new client will be created
1469

1470
  """
1471
  if cl is None:
1472
    cl = GetClient()
1473

    
1474
  job_id = cl.SubmitJob(ops)
1475

    
1476
  return job_id
1477

    
1478

    
1479
def GenericPollJob(job_id, cbs, report_cbs):
1480
  """Generic job-polling function.
1481

1482
  @type job_id: number
1483
  @param job_id: Job ID
1484
  @type cbs: Instance of L{JobPollCbBase}
1485
  @param cbs: Data callbacks
1486
  @type report_cbs: Instance of L{JobPollReportCbBase}
1487
  @param report_cbs: Reporting callbacks
1488

1489
  """
1490
  prev_job_info = None
1491
  prev_logmsg_serial = None
1492

    
1493
  status = None
1494

    
1495
  while True:
1496
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1497
                                      prev_logmsg_serial)
1498
    if not result:
1499
      # job not found, go away!
1500
      raise errors.JobLost("Job with id %s lost" % job_id)
1501

    
1502
    if result == constants.JOB_NOTCHANGED:
1503
      report_cbs.ReportNotChanged(job_id, status)
1504

    
1505
      # Wait again
1506
      continue
1507

    
1508
    # Split result, a tuple of (field values, log entries)
1509
    (job_info, log_entries) = result
1510
    (status, ) = job_info
1511

    
1512
    if log_entries:
1513
      for log_entry in log_entries:
1514
        (serial, timestamp, log_type, message) = log_entry
1515
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1516
                                    log_type, message)
1517
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1518

    
1519
    # TODO: Handle canceled and archived jobs
1520
    elif status in (constants.JOB_STATUS_SUCCESS,
1521
                    constants.JOB_STATUS_ERROR,
1522
                    constants.JOB_STATUS_CANCELING,
1523
                    constants.JOB_STATUS_CANCELED):
1524
      break
1525

    
1526
    prev_job_info = job_info
1527

    
1528
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1529
  if not jobs:
1530
    raise errors.JobLost("Job with id %s lost" % job_id)
1531

    
1532
  status, opstatus, result = jobs[0]
1533

    
1534
  if status == constants.JOB_STATUS_SUCCESS:
1535
    return result
1536

    
1537
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1538
    raise errors.OpExecError("Job was canceled")
1539

    
1540
  has_ok = False
1541
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1542
    if status == constants.OP_STATUS_SUCCESS:
1543
      has_ok = True
1544
    elif status == constants.OP_STATUS_ERROR:
1545
      errors.MaybeRaise(msg)
1546

    
1547
      if has_ok:
1548
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1549
                                 (idx, msg))
1550

    
1551
      raise errors.OpExecError(str(msg))
1552

    
1553
  # default failure mode
1554
  raise errors.OpExecError(result)
1555

    
1556

    
1557
class JobPollCbBase:
1558
  """Base class for L{GenericPollJob} callbacks.
1559

1560
  """
1561
  def __init__(self):
1562
    """Initializes this class.
1563

1564
    """
1565

    
1566
  def WaitForJobChangeOnce(self, job_id, fields,
1567
                           prev_job_info, prev_log_serial):
1568
    """Waits for changes on a job.
1569

1570
    """
1571
    raise NotImplementedError()
1572

    
1573
  def QueryJobs(self, job_ids, fields):
1574
    """Returns the selected fields for the selected job IDs.
1575

1576
    @type job_ids: list of numbers
1577
    @param job_ids: Job IDs
1578
    @type fields: list of strings
1579
    @param fields: Fields
1580

1581
    """
1582
    raise NotImplementedError()
1583

    
1584

    
1585
class JobPollReportCbBase:
1586
  """Base class for L{GenericPollJob} reporting callbacks.
1587

1588
  """
1589
  def __init__(self):
1590
    """Initializes this class.
1591

1592
    """
1593

    
1594
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1595
    """Handles a log message.
1596

1597
    """
1598
    raise NotImplementedError()
1599

    
1600
  def ReportNotChanged(self, job_id, status):
1601
    """Called for if a job hasn't changed in a while.
1602

1603
    @type job_id: number
1604
    @param job_id: Job ID
1605
    @type status: string or None
1606
    @param status: Job status if available
1607

1608
    """
1609
    raise NotImplementedError()
1610

    
1611

    
1612
class _LuxiJobPollCb(JobPollCbBase):
1613
  def __init__(self, cl):
1614
    """Initializes this class.
1615

1616
    """
1617
    JobPollCbBase.__init__(self)
1618
    self.cl = cl
1619

    
1620
  def WaitForJobChangeOnce(self, job_id, fields,
1621
                           prev_job_info, prev_log_serial):
1622
    """Waits for changes on a job.
1623

1624
    """
1625
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1626
                                        prev_job_info, prev_log_serial)
1627

    
1628
  def QueryJobs(self, job_ids, fields):
1629
    """Returns the selected fields for the selected job IDs.
1630

1631
    """
1632
    return self.cl.QueryJobs(job_ids, fields)
1633

    
1634

    
1635
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1636
  def __init__(self, feedback_fn):
1637
    """Initializes this class.
1638

1639
    """
1640
    JobPollReportCbBase.__init__(self)
1641

    
1642
    self.feedback_fn = feedback_fn
1643

    
1644
    assert callable(feedback_fn)
1645

    
1646
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1647
    """Handles a log message.
1648

1649
    """
1650
    self.feedback_fn((timestamp, log_type, log_msg))
1651

    
1652
  def ReportNotChanged(self, job_id, status):
1653
    """Called if a job hasn't changed in a while.
1654

1655
    """
1656
    # Ignore
1657

    
1658

    
1659
class StdioJobPollReportCb(JobPollReportCbBase):
1660
  def __init__(self):
1661
    """Initializes this class.
1662

1663
    """
1664
    JobPollReportCbBase.__init__(self)
1665

    
1666
    self.notified_queued = False
1667
    self.notified_waitlock = False
1668

    
1669
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1670
    """Handles a log message.
1671

1672
    """
1673
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1674
             FormatLogMessage(log_type, log_msg))
1675

    
1676
  def ReportNotChanged(self, job_id, status):
1677
    """Called if a job hasn't changed in a while.
1678

1679
    """
1680
    if status is None:
1681
      return
1682

    
1683
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1684
      ToStderr("Job %s is waiting in queue", job_id)
1685
      self.notified_queued = True
1686

    
1687
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1688
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1689
      self.notified_waitlock = True
1690

    
1691

    
1692
def FormatLogMessage(log_type, log_msg):
1693
  """Formats a job message according to its type.
1694

1695
  """
1696
  if log_type != constants.ELOG_MESSAGE:
1697
    log_msg = str(log_msg)
1698

    
1699
  return utils.SafeEncode(log_msg)
1700

    
1701

    
1702
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1703
  """Function to poll for the result of a job.
1704

1705
  @type job_id: job identified
1706
  @param job_id: the job to poll for results
1707
  @type cl: luxi.Client
1708
  @param cl: the luxi client to use for communicating with the master;
1709
             if None, a new client will be created
1710

1711
  """
1712
  if cl is None:
1713
    cl = GetClient()
1714

    
1715
  if reporter is None:
1716
    if feedback_fn:
1717
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1718
    else:
1719
      reporter = StdioJobPollReportCb()
1720
  elif feedback_fn:
1721
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1722

    
1723
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1724

    
1725

    
1726
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1727
  """Legacy function to submit an opcode.
1728

1729
  This is just a simple wrapper over the construction of the processor
1730
  instance. It should be extended to better handle feedback and
1731
  interaction functions.
1732

1733
  """
1734
  if cl is None:
1735
    cl = GetClient()
1736

    
1737
  SetGenericOpcodeOpts([op], opts)
1738

    
1739
  job_id = SendJob([op], cl=cl)
1740

    
1741
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1742
                       reporter=reporter)
1743

    
1744
  return op_results[0]
1745

    
1746

    
1747
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1748
  """Wrapper around SubmitOpCode or SendJob.
1749

1750
  This function will decide, based on the 'opts' parameter, whether to
1751
  submit and wait for the result of the opcode (and return it), or
1752
  whether to just send the job and print its identifier. It is used in
1753
  order to simplify the implementation of the '--submit' option.
1754

1755
  It will also process the opcodes if we're sending the via SendJob
1756
  (otherwise SubmitOpCode does it).
1757

1758
  """
1759
  if opts and opts.submit_only:
1760
    job = [op]
1761
    SetGenericOpcodeOpts(job, opts)
1762
    job_id = SendJob(job, cl=cl)
1763
    raise JobSubmittedException(job_id)
1764
  else:
1765
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1766

    
1767

    
1768
def SetGenericOpcodeOpts(opcode_list, options):
1769
  """Processor for generic options.
1770

1771
  This function updates the given opcodes based on generic command
1772
  line options (like debug, dry-run, etc.).
1773

1774
  @param opcode_list: list of opcodes
1775
  @param options: command line options or None
1776
  @return: None (in-place modification)
1777

1778
  """
1779
  if not options:
1780
    return
1781
  for op in opcode_list:
1782
    op.debug_level = options.debug
1783
    if hasattr(options, "dry_run"):
1784
      op.dry_run = options.dry_run
1785
    if getattr(options, "priority", None) is not None:
1786
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1787

    
1788

    
1789
def GetClient():
1790
  # TODO: Cache object?
1791
  try:
1792
    client = luxi.Client()
1793
  except luxi.NoMasterError:
1794
    ss = ssconf.SimpleStore()
1795

    
1796
    # Try to read ssconf file
1797
    try:
1798
      ss.GetMasterNode()
1799
    except errors.ConfigurationError:
1800
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1801
                                 " not part of a cluster")
1802

    
1803
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1804
    if master != myself:
1805
      raise errors.OpPrereqError("This is not the master node, please connect"
1806
                                 " to node '%s' and rerun the command" %
1807
                                 master)
1808
    raise
1809
  return client
1810

    
1811

    
1812
def FormatError(err):
1813
  """Return a formatted error message for a given error.
1814

1815
  This function takes an exception instance and returns a tuple
1816
  consisting of two values: first, the recommended exit code, and
1817
  second, a string describing the error message (not
1818
  newline-terminated).
1819

1820
  """
1821
  retcode = 1
1822
  obuf = StringIO()
1823
  msg = str(err)
1824
  if isinstance(err, errors.ConfigurationError):
1825
    txt = "Corrupt configuration file: %s" % msg
1826
    logging.error(txt)
1827
    obuf.write(txt + "\n")
1828
    obuf.write("Aborting.")
1829
    retcode = 2
1830
  elif isinstance(err, errors.HooksAbort):
1831
    obuf.write("Failure: hooks execution failed:\n")
1832
    for node, script, out in err.args[0]:
1833
      if out:
1834
        obuf.write("  node: %s, script: %s, output: %s\n" %
1835
                   (node, script, out))
1836
      else:
1837
        obuf.write("  node: %s, script: %s (no output)\n" %
1838
                   (node, script))
1839
  elif isinstance(err, errors.HooksFailure):
1840
    obuf.write("Failure: hooks general failure: %s" % msg)
1841
  elif isinstance(err, errors.ResolverError):
1842
    this_host = netutils.Hostname.GetSysName()
1843
    if err.args[0] == this_host:
1844
      msg = "Failure: can't resolve my own hostname ('%s')"
1845
    else:
1846
      msg = "Failure: can't resolve hostname '%s'"
1847
    obuf.write(msg % err.args[0])
1848
  elif isinstance(err, errors.OpPrereqError):
1849
    if len(err.args) == 2:
1850
      obuf.write("Failure: prerequisites not met for this"
1851
               " operation:\nerror type: %s, error details:\n%s" %
1852
                 (err.args[1], err.args[0]))
1853
    else:
1854
      obuf.write("Failure: prerequisites not met for this"
1855
                 " operation:\n%s" % msg)
1856
  elif isinstance(err, errors.OpExecError):
1857
    obuf.write("Failure: command execution error:\n%s" % msg)
1858
  elif isinstance(err, errors.TagError):
1859
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1860
  elif isinstance(err, errors.JobQueueDrainError):
1861
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1862
               " accept new requests\n")
1863
  elif isinstance(err, errors.JobQueueFull):
1864
    obuf.write("Failure: the job queue is full and doesn't accept new"
1865
               " job submissions until old jobs are archived\n")
1866
  elif isinstance(err, errors.TypeEnforcementError):
1867
    obuf.write("Parameter Error: %s" % msg)
1868
  elif isinstance(err, errors.ParameterError):
1869
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1870
  elif isinstance(err, luxi.NoMasterError):
1871
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1872
               " and listening for connections?")
1873
  elif isinstance(err, luxi.TimeoutError):
1874
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1875
               " been submitted and will continue to run even if the call"
1876
               " timed out. Useful commands in this situation are \"gnt-job"
1877
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1878
    obuf.write(msg)
1879
  elif isinstance(err, luxi.PermissionError):
1880
    obuf.write("It seems you don't have permissions to connect to the"
1881
               " master daemon.\nPlease retry as a different user.")
1882
  elif isinstance(err, luxi.ProtocolError):
1883
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1884
               "%s" % msg)
1885
  elif isinstance(err, errors.JobLost):
1886
    obuf.write("Error checking job status: %s" % msg)
1887
  elif isinstance(err, errors.GenericError):
1888
    obuf.write("Unhandled Ganeti error: %s" % msg)
1889
  elif isinstance(err, JobSubmittedException):
1890
    obuf.write("JobID: %s\n" % err.args[0])
1891
    retcode = 0
1892
  else:
1893
    obuf.write("Unhandled exception: %s" % msg)
1894
  return retcode, obuf.getvalue().rstrip('\n')
1895

    
1896

    
1897
def GenericMain(commands, override=None, aliases=None):
1898
  """Generic main function for all the gnt-* commands.
1899

1900
  Arguments:
1901
    - commands: a dictionary with a special structure, see the design doc
1902
                for command line handling.
1903
    - override: if not None, we expect a dictionary with keys that will
1904
                override command line options; this can be used to pass
1905
                options from the scripts to generic functions
1906
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1907

1908
  """
1909
  # save the program name and the entire command line for later logging
1910
  if sys.argv:
1911
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1912
    if len(sys.argv) >= 2:
1913
      binary += " " + sys.argv[1]
1914
      old_cmdline = " ".join(sys.argv[2:])
1915
    else:
1916
      old_cmdline = ""
1917
  else:
1918
    binary = "<unknown program>"
1919
    old_cmdline = ""
1920

    
1921
  if aliases is None:
1922
    aliases = {}
1923

    
1924
  try:
1925
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1926
  except errors.ParameterError, err:
1927
    result, err_msg = FormatError(err)
1928
    ToStderr(err_msg)
1929
    return 1
1930

    
1931
  if func is None: # parse error
1932
    return 1
1933

    
1934
  if override is not None:
1935
    for key, val in override.iteritems():
1936
      setattr(options, key, val)
1937

    
1938
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1939
                     stderr_logging=True)
1940

    
1941
  if old_cmdline:
1942
    logging.info("run with arguments '%s'", old_cmdline)
1943
  else:
1944
    logging.info("run with no arguments")
1945

    
1946
  try:
1947
    result = func(options, args)
1948
  except (errors.GenericError, luxi.ProtocolError,
1949
          JobSubmittedException), err:
1950
    result, err_msg = FormatError(err)
1951
    logging.exception("Error during command processing")
1952
    ToStderr(err_msg)
1953
  except KeyboardInterrupt:
1954
    result = constants.EXIT_FAILURE
1955
    ToStderr("Aborted. Note that if the operation created any jobs, they"
1956
             " might have been submitted and"
1957
             " will continue to run in the background.")
1958

    
1959
  return result
1960

    
1961

    
1962
def ParseNicOption(optvalue):
1963
  """Parses the value of the --net option(s).
1964

1965
  """
1966
  try:
1967
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1968
  except (TypeError, ValueError), err:
1969
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1970

    
1971
  nics = [{}] * nic_max
1972
  for nidx, ndict in optvalue:
1973
    nidx = int(nidx)
1974

    
1975
    if not isinstance(ndict, dict):
1976
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1977
                                 " got %s" % (nidx, ndict))
1978

    
1979
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1980

    
1981
    nics[nidx] = ndict
1982

    
1983
  return nics
1984

    
1985

    
1986
def GenericInstanceCreate(mode, opts, args):
1987
  """Add an instance to the cluster via either creation or import.
1988

1989
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1990
  @param opts: the command line options selected by the user
1991
  @type args: list
1992
  @param args: should contain only one element, the new instance name
1993
  @rtype: int
1994
  @return: the desired exit code
1995

1996
  """
1997
  instance = args[0]
1998

    
1999
  (pnode, snode) = SplitNodeOption(opts.node)
2000

    
2001
  hypervisor = None
2002
  hvparams = {}
2003
  if opts.hypervisor:
2004
    hypervisor, hvparams = opts.hypervisor
2005

    
2006
  if opts.nics:
2007
    nics = ParseNicOption(opts.nics)
2008
  elif opts.no_nics:
2009
    # no nics
2010
    nics = []
2011
  elif mode == constants.INSTANCE_CREATE:
2012
    # default of one nic, all auto
2013
    nics = [{}]
2014
  else:
2015
    # mode == import
2016
    nics = []
2017

    
2018
  if opts.disk_template == constants.DT_DISKLESS:
2019
    if opts.disks or opts.sd_size is not None:
2020
      raise errors.OpPrereqError("Diskless instance but disk"
2021
                                 " information passed")
2022
    disks = []
2023
  else:
2024
    if (not opts.disks and not opts.sd_size
2025
        and mode == constants.INSTANCE_CREATE):
2026
      raise errors.OpPrereqError("No disk information specified")
2027
    if opts.disks and opts.sd_size is not None:
2028
      raise errors.OpPrereqError("Please use either the '--disk' or"
2029
                                 " '-s' option")
2030
    if opts.sd_size is not None:
2031
      opts.disks = [(0, {"size": opts.sd_size})]
2032

    
2033
    if opts.disks:
2034
      try:
2035
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2036
      except ValueError, err:
2037
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2038
      disks = [{}] * disk_max
2039
    else:
2040
      disks = []
2041
    for didx, ddict in opts.disks:
2042
      didx = int(didx)
2043
      if not isinstance(ddict, dict):
2044
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2045
        raise errors.OpPrereqError(msg)
2046
      elif "size" in ddict:
2047
        if "adopt" in ddict:
2048
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2049
                                     " (disk %d)" % didx)
2050
        try:
2051
          ddict["size"] = utils.ParseUnit(ddict["size"])
2052
        except ValueError, err:
2053
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2054
                                     (didx, err))
2055
      elif "adopt" in ddict:
2056
        if mode == constants.INSTANCE_IMPORT:
2057
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2058
                                     " import")
2059
        ddict["size"] = 0
2060
      else:
2061
        raise errors.OpPrereqError("Missing size or adoption source for"
2062
                                   " disk %d" % didx)
2063
      disks[didx] = ddict
2064

    
2065
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2066
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2067

    
2068
  if mode == constants.INSTANCE_CREATE:
2069
    start = opts.start
2070
    os_type = opts.os
2071
    force_variant = opts.force_variant
2072
    src_node = None
2073
    src_path = None
2074
    no_install = opts.no_install
2075
    identify_defaults = False
2076
  elif mode == constants.INSTANCE_IMPORT:
2077
    start = False
2078
    os_type = None
2079
    force_variant = False
2080
    src_node = opts.src_node
2081
    src_path = opts.src_dir
2082
    no_install = None
2083
    identify_defaults = opts.identify_defaults
2084
  else:
2085
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2086

    
2087
  op = opcodes.OpInstanceCreate(instance_name=instance,
2088
                                disks=disks,
2089
                                disk_template=opts.disk_template,
2090
                                nics=nics,
2091
                                pnode=pnode, snode=snode,
2092
                                ip_check=opts.ip_check,
2093
                                name_check=opts.name_check,
2094
                                wait_for_sync=opts.wait_for_sync,
2095
                                file_storage_dir=opts.file_storage_dir,
2096
                                file_driver=opts.file_driver,
2097
                                iallocator=opts.iallocator,
2098
                                hypervisor=hypervisor,
2099
                                hvparams=hvparams,
2100
                                beparams=opts.beparams,
2101
                                osparams=opts.osparams,
2102
                                mode=mode,
2103
                                start=start,
2104
                                os_type=os_type,
2105
                                force_variant=force_variant,
2106
                                src_node=src_node,
2107
                                src_path=src_path,
2108
                                no_install=no_install,
2109
                                identify_defaults=identify_defaults)
2110

    
2111
  SubmitOrSend(op, opts)
2112
  return 0
2113

    
2114

    
2115
class _RunWhileClusterStoppedHelper:
2116
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2117

2118
  """
2119
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2120
    """Initializes this class.
2121

2122
    @type feedback_fn: callable
2123
    @param feedback_fn: Feedback function
2124
    @type cluster_name: string
2125
    @param cluster_name: Cluster name
2126
    @type master_node: string
2127
    @param master_node Master node name
2128
    @type online_nodes: list
2129
    @param online_nodes: List of names of online nodes
2130

2131
    """
2132
    self.feedback_fn = feedback_fn
2133
    self.cluster_name = cluster_name
2134
    self.master_node = master_node
2135
    self.online_nodes = online_nodes
2136

    
2137
    self.ssh = ssh.SshRunner(self.cluster_name)
2138

    
2139
    self.nonmaster_nodes = [name for name in online_nodes
2140
                            if name != master_node]
2141

    
2142
    assert self.master_node not in self.nonmaster_nodes
2143

    
2144
  def _RunCmd(self, node_name, cmd):
2145
    """Runs a command on the local or a remote machine.
2146

2147
    @type node_name: string
2148
    @param node_name: Machine name
2149
    @type cmd: list
2150
    @param cmd: Command
2151

2152
    """
2153
    if node_name is None or node_name == self.master_node:
2154
      # No need to use SSH
2155
      result = utils.RunCmd(cmd)
2156
    else:
2157
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2158

    
2159
    if result.failed:
2160
      errmsg = ["Failed to run command %s" % result.cmd]
2161
      if node_name:
2162
        errmsg.append("on node %s" % node_name)
2163
      errmsg.append(": exitcode %s and error %s" %
2164
                    (result.exit_code, result.output))
2165
      raise errors.OpExecError(" ".join(errmsg))
2166

    
2167
  def Call(self, fn, *args):
2168
    """Call function while all daemons are stopped.
2169

2170
    @type fn: callable
2171
    @param fn: Function to be called
2172

2173
    """
2174
    # Pause watcher by acquiring an exclusive lock on watcher state file
2175
    self.feedback_fn("Blocking watcher")
2176
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2177
    try:
2178
      # TODO: Currently, this just blocks. There's no timeout.
2179
      # TODO: Should it be a shared lock?
2180
      watcher_block.Exclusive(blocking=True)
2181

    
2182
      # Stop master daemons, so that no new jobs can come in and all running
2183
      # ones are finished
2184
      self.feedback_fn("Stopping master daemons")
2185
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2186
      try:
2187
        # Stop daemons on all nodes
2188
        for node_name in self.online_nodes:
2189
          self.feedback_fn("Stopping daemons on %s" % node_name)
2190
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2191

    
2192
        # All daemons are shut down now
2193
        try:
2194
          return fn(self, *args)
2195
        except Exception, err:
2196
          _, errmsg = FormatError(err)
2197
          logging.exception("Caught exception")
2198
          self.feedback_fn(errmsg)
2199
          raise
2200
      finally:
2201
        # Start cluster again, master node last
2202
        for node_name in self.nonmaster_nodes + [self.master_node]:
2203
          self.feedback_fn("Starting daemons on %s" % node_name)
2204
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2205
    finally:
2206
      # Resume watcher
2207
      watcher_block.Close()
2208

    
2209

    
2210
def RunWhileClusterStopped(feedback_fn, fn, *args):
2211
  """Calls a function while all cluster daemons are stopped.
2212

2213
  @type feedback_fn: callable
2214
  @param feedback_fn: Feedback function
2215
  @type fn: callable
2216
  @param fn: Function to be called when daemons are stopped
2217

2218
  """
2219
  feedback_fn("Gathering cluster information")
2220

    
2221
  # This ensures we're running on the master daemon
2222
  cl = GetClient()
2223

    
2224
  (cluster_name, master_node) = \
2225
    cl.QueryConfigValues(["cluster_name", "master_node"])
2226

    
2227
  online_nodes = GetOnlineNodes([], cl=cl)
2228

    
2229
  # Don't keep a reference to the client. The master daemon will go away.
2230
  del cl
2231

    
2232
  assert master_node in online_nodes
2233

    
2234
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2235
                                       online_nodes).Call(fn, *args)
2236

    
2237

    
2238
def GenerateTable(headers, fields, separator, data,
2239
                  numfields=None, unitfields=None,
2240
                  units=None):
2241
  """Prints a table with headers and different fields.
2242

2243
  @type headers: dict
2244
  @param headers: dictionary mapping field names to headers for
2245
      the table
2246
  @type fields: list
2247
  @param fields: the field names corresponding to each row in
2248
      the data field
2249
  @param separator: the separator to be used; if this is None,
2250
      the default 'smart' algorithm is used which computes optimal
2251
      field width, otherwise just the separator is used between
2252
      each field
2253
  @type data: list
2254
  @param data: a list of lists, each sublist being one row to be output
2255
  @type numfields: list
2256
  @param numfields: a list with the fields that hold numeric
2257
      values and thus should be right-aligned
2258
  @type unitfields: list
2259
  @param unitfields: a list with the fields that hold numeric
2260
      values that should be formatted with the units field
2261
  @type units: string or None
2262
  @param units: the units we should use for formatting, or None for
2263
      automatic choice (human-readable for non-separator usage, otherwise
2264
      megabytes); this is a one-letter string
2265

2266
  """
2267
  if units is None:
2268
    if separator:
2269
      units = "m"
2270
    else:
2271
      units = "h"
2272

    
2273
  if numfields is None:
2274
    numfields = []
2275
  if unitfields is None:
2276
    unitfields = []
2277

    
2278
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2279
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2280

    
2281
  format_fields = []
2282
  for field in fields:
2283
    if headers and field not in headers:
2284
      # TODO: handle better unknown fields (either revert to old
2285
      # style of raising exception, or deal more intelligently with
2286
      # variable fields)
2287
      headers[field] = field
2288
    if separator is not None:
2289
      format_fields.append("%s")
2290
    elif numfields.Matches(field):
2291
      format_fields.append("%*s")
2292
    else:
2293
      format_fields.append("%-*s")
2294

    
2295
  if separator is None:
2296
    mlens = [0 for name in fields]
2297
    format_str = ' '.join(format_fields)
2298
  else:
2299
    format_str = separator.replace("%", "%%").join(format_fields)
2300

    
2301
  for row in data:
2302
    if row is None:
2303
      continue
2304
    for idx, val in enumerate(row):
2305
      if unitfields.Matches(fields[idx]):
2306
        try:
2307
          val = int(val)
2308
        except (TypeError, ValueError):
2309
          pass
2310
        else:
2311
          val = row[idx] = utils.FormatUnit(val, units)
2312
      val = row[idx] = str(val)
2313
      if separator is None:
2314
        mlens[idx] = max(mlens[idx], len(val))
2315

    
2316
  result = []
2317
  if headers:
2318
    args = []
2319
    for idx, name in enumerate(fields):
2320
      hdr = headers[name]
2321
      if separator is None:
2322
        mlens[idx] = max(mlens[idx], len(hdr))
2323
        args.append(mlens[idx])
2324
      args.append(hdr)
2325
    result.append(format_str % tuple(args))
2326

    
2327
  if separator is None:
2328
    assert len(mlens) == len(fields)
2329

    
2330
    if fields and not numfields.Matches(fields[-1]):
2331
      mlens[-1] = 0
2332

    
2333
  for line in data:
2334
    args = []
2335
    if line is None:
2336
      line = ['-' for _ in fields]
2337
    for idx in range(len(fields)):
2338
      if separator is None:
2339
        args.append(mlens[idx])
2340
      args.append(line[idx])
2341
    result.append(format_str % tuple(args))
2342

    
2343
  return result
2344

    
2345

    
2346
def _FormatBool(value):
2347
  """Formats a boolean value as a string.
2348

2349
  """
2350
  if value:
2351
    return "Y"
2352
  return "N"
2353

    
2354

    
2355
#: Default formatting for query results; (callback, align right)
2356
_DEFAULT_FORMAT_QUERY = {
2357
  constants.QFT_TEXT: (str, False),
2358
  constants.QFT_BOOL: (_FormatBool, False),
2359
  constants.QFT_NUMBER: (str, True),
2360
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2361
  constants.QFT_OTHER: (str, False),
2362
  constants.QFT_UNKNOWN: (str, False),
2363
  }
2364

    
2365

    
2366
def _GetColumnFormatter(fdef, override, unit):
2367
  """Returns formatting function for a field.
2368

2369
  @type fdef: L{objects.QueryFieldDefinition}
2370
  @type override: dict
2371
  @param override: Dictionary for overriding field formatting functions,
2372
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2373
  @type unit: string
2374
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2375
  @rtype: tuple; (callable, bool)
2376
  @return: Returns the function to format a value (takes one parameter) and a
2377
    boolean for aligning the value on the right-hand side
2378

2379
  """
2380
  fmt = override.get(fdef.name, None)
2381
  if fmt is not None:
2382
    return fmt
2383

    
2384
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2385

    
2386
  if fdef.kind == constants.QFT_UNIT:
2387
    # Can't keep this information in the static dictionary
2388
    return (lambda value: utils.FormatUnit(value, unit), True)
2389

    
2390
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2391
  if fmt is not None:
2392
    return fmt
2393

    
2394
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2395

    
2396

    
2397
class _QueryColumnFormatter:
2398
  """Callable class for formatting fields of a query.
2399

2400
  """
2401
  def __init__(self, fn, status_fn, verbose):
2402
    """Initializes this class.
2403

2404
    @type fn: callable
2405
    @param fn: Formatting function
2406
    @type status_fn: callable
2407
    @param status_fn: Function to report fields' status
2408
    @type verbose: boolean
2409
    @param verbose: whether to use verbose field descriptions or not
2410

2411
    """
2412
    self._fn = fn
2413
    self._status_fn = status_fn
2414
    self._verbose = verbose
2415

    
2416
  def __call__(self, data):
2417
    """Returns a field's string representation.
2418

2419
    """
2420
    (status, value) = data
2421

    
2422
    # Report status
2423
    self._status_fn(status)
2424

    
2425
    if status == constants.RS_NORMAL:
2426
      return self._fn(value)
2427

    
2428
    assert value is None, \
2429
           "Found value %r for abnormal status %s" % (value, status)
2430

    
2431
    return FormatResultError(status, self._verbose)
2432

    
2433

    
2434
def FormatResultError(status, verbose):
2435
  """Formats result status other than L{constants.RS_NORMAL}.
2436

2437
  @param status: The result status
2438
  @type verbose: boolean
2439
  @param verbose: Whether to return the verbose text
2440
  @return: Text of result status
2441

2442
  """
2443
  assert status != constants.RS_NORMAL, \
2444
         "FormatResultError called with status equal to constants.RS_NORMAL"
2445
  try:
2446
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2447
  except KeyError:
2448
    raise NotImplementedError("Unknown status %s" % status)
2449
  else:
2450
    if verbose:
2451
      return verbose_text
2452
    return normal_text
2453

    
2454

    
2455
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2456
                      header=False, verbose=False):
2457
  """Formats data in L{objects.QueryResponse}.
2458

2459
  @type result: L{objects.QueryResponse}
2460
  @param result: result of query operation
2461
  @type unit: string
2462
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2463
    see L{utils.text.FormatUnit}
2464
  @type format_override: dict
2465
  @param format_override: Dictionary for overriding field formatting functions,
2466
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2467
  @type separator: string or None
2468
  @param separator: String used to separate fields
2469
  @type header: bool
2470
  @param header: Whether to output header row
2471
  @type verbose: boolean
2472
  @param verbose: whether to use verbose field descriptions or not
2473

2474
  """
2475
  if unit is None:
2476
    if separator:
2477
      unit = "m"
2478
    else:
2479
      unit = "h"
2480

    
2481
  if format_override is None:
2482
    format_override = {}
2483

    
2484
  stats = dict.fromkeys(constants.RS_ALL, 0)
2485

    
2486
  def _RecordStatus(status):
2487
    if status in stats:
2488
      stats[status] += 1
2489

    
2490
  columns = []
2491
  for fdef in result.fields:
2492
    assert fdef.title and fdef.name
2493
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2494
    columns.append(TableColumn(fdef.title,
2495
                               _QueryColumnFormatter(fn, _RecordStatus,
2496
                                                     verbose),
2497
                               align_right))
2498

    
2499
  table = FormatTable(result.data, columns, header, separator)
2500

    
2501
  # Collect statistics
2502
  assert len(stats) == len(constants.RS_ALL)
2503
  assert compat.all(count >= 0 for count in stats.values())
2504

    
2505
  # Determine overall status. If there was no data, unknown fields must be
2506
  # detected via the field definitions.
2507
  if (stats[constants.RS_UNKNOWN] or
2508
      (not result.data and _GetUnknownFields(result.fields))):
2509
    status = QR_UNKNOWN
2510
  elif compat.any(count > 0 for key, count in stats.items()
2511
                  if key != constants.RS_NORMAL):
2512
    status = QR_INCOMPLETE
2513
  else:
2514
    status = QR_NORMAL
2515

    
2516
  return (status, table)
2517

    
2518

    
2519
def _GetUnknownFields(fdefs):
2520
  """Returns list of unknown fields included in C{fdefs}.
2521

2522
  @type fdefs: list of L{objects.QueryFieldDefinition}
2523

2524
  """
2525
  return [fdef for fdef in fdefs
2526
          if fdef.kind == constants.QFT_UNKNOWN]
2527

    
2528

    
2529
def _WarnUnknownFields(fdefs):
2530
  """Prints a warning to stderr if a query included unknown fields.
2531

2532
  @type fdefs: list of L{objects.QueryFieldDefinition}
2533

2534
  """
2535
  unknown = _GetUnknownFields(fdefs)
2536
  if unknown:
2537
    ToStderr("Warning: Queried for unknown fields %s",
2538
             utils.CommaJoin(fdef.name for fdef in unknown))
2539
    return True
2540

    
2541
  return False
2542

    
2543

    
2544
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2545
                format_override=None, verbose=False):
2546
  """Generic implementation for listing all items of a resource.
2547

2548
  @param resource: One of L{constants.QR_OP_LUXI}
2549
  @type fields: list of strings
2550
  @param fields: List of fields to query for
2551
  @type names: list of strings
2552
  @param names: Names of items to query for
2553
  @type unit: string or None
2554
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2555
    None for automatic choice (human-readable for non-separator usage,
2556
    otherwise megabytes); this is a one-letter string
2557
  @type separator: string or None
2558
  @param separator: String used to separate fields
2559
  @type header: bool
2560
  @param header: Whether to show header row
2561
  @type format_override: dict
2562
  @param format_override: Dictionary for overriding field formatting functions,
2563
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2564
  @type verbose: boolean
2565
  @param verbose: whether to use verbose field descriptions or not
2566

2567
  """
2568
  if cl is None:
2569
    cl = GetClient()
2570

    
2571
  if not names:
2572
    names = None
2573

    
2574
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2575

    
2576
  found_unknown = _WarnUnknownFields(response.fields)
2577

    
2578
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2579
                                     header=header,
2580
                                     format_override=format_override,
2581
                                     verbose=verbose)
2582

    
2583
  for line in data:
2584
    ToStdout(line)
2585

    
2586
  assert ((found_unknown and status == QR_UNKNOWN) or
2587
          (not found_unknown and status != QR_UNKNOWN))
2588

    
2589
  if status == QR_UNKNOWN:
2590
    return constants.EXIT_UNKNOWN_FIELD
2591

    
2592
  # TODO: Should the list command fail if not all data could be collected?
2593
  return constants.EXIT_SUCCESS
2594

    
2595

    
2596
def GenericListFields(resource, fields, separator, header, cl=None):
2597
  """Generic implementation for listing fields for a resource.
2598

2599
  @param resource: One of L{constants.QR_OP_LUXI}
2600
  @type fields: list of strings
2601
  @param fields: List of fields to query for
2602
  @type separator: string or None
2603
  @param separator: String used to separate fields
2604
  @type header: bool
2605
  @param header: Whether to show header row
2606

2607
  """
2608
  if cl is None:
2609
    cl = GetClient()
2610

    
2611
  if not fields:
2612
    fields = None
2613

    
2614
  response = cl.QueryFields(resource, fields)
2615

    
2616
  found_unknown = _WarnUnknownFields(response.fields)
2617

    
2618
  columns = [
2619
    TableColumn("Name", str, False),
2620
    TableColumn("Title", str, False),
2621
    TableColumn("Description", str, False),
2622
    ]
2623

    
2624
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2625

    
2626
  for line in FormatTable(rows, columns, header, separator):
2627
    ToStdout(line)
2628

    
2629
  if found_unknown:
2630
    return constants.EXIT_UNKNOWN_FIELD
2631

    
2632
  return constants.EXIT_SUCCESS
2633

    
2634

    
2635
class TableColumn:
2636
  """Describes a column for L{FormatTable}.
2637

2638
  """
2639
  def __init__(self, title, fn, align_right):
2640
    """Initializes this class.
2641

2642
    @type title: string
2643
    @param title: Column title
2644
    @type fn: callable
2645
    @param fn: Formatting function
2646
    @type align_right: bool
2647
    @param align_right: Whether to align values on the right-hand side
2648

2649
    """
2650
    self.title = title
2651
    self.format = fn
2652
    self.align_right = align_right
2653

    
2654

    
2655
def _GetColFormatString(width, align_right):
2656
  """Returns the format string for a field.
2657

2658
  """
2659
  if align_right:
2660
    sign = ""
2661
  else:
2662
    sign = "-"
2663

    
2664
  return "%%%s%ss" % (sign, width)
2665

    
2666

    
2667
def FormatTable(rows, columns, header, separator):
2668
  """Formats data as a table.
2669

2670
  @type rows: list of lists
2671
  @param rows: Row data, one list per row
2672
  @type columns: list of L{TableColumn}
2673
  @param columns: Column descriptions
2674
  @type header: bool
2675
  @param header: Whether to show header row
2676
  @type separator: string or None
2677
  @param separator: String used to separate columns
2678

2679
  """
2680
  if header:
2681
    data = [[col.title for col in columns]]
2682
    colwidth = [len(col.title) for col in columns]
2683
  else:
2684
    data = []
2685
    colwidth = [0 for _ in columns]
2686

    
2687
  # Format row data
2688
  for row in rows:
2689
    assert len(row) == len(columns)
2690

    
2691
    formatted = [col.format(value) for value, col in zip(row, columns)]
2692

    
2693
    if separator is None:
2694
      # Update column widths
2695
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2696
        # Modifying a list's items while iterating is fine
2697
        colwidth[idx] = max(oldwidth, len(value))
2698

    
2699
    data.append(formatted)
2700

    
2701
  if separator is not None:
2702
    # Return early if a separator is used
2703
    return [separator.join(row) for row in data]
2704

    
2705
  if columns and not columns[-1].align_right:
2706
    # Avoid unnecessary spaces at end of line
2707
    colwidth[-1] = 0
2708

    
2709
  # Build format string
2710
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2711
                  for col, width in zip(columns, colwidth)])
2712

    
2713
  return [fmt % tuple(row) for row in data]
2714

    
2715

    
2716
def FormatTimestamp(ts):
2717
  """Formats a given timestamp.
2718

2719
  @type ts: timestamp
2720
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2721

2722
  @rtype: string
2723
  @return: a string with the formatted timestamp
2724

2725
  """
2726
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2727
    return '?'
2728
  sec, usec = ts
2729
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2730

    
2731

    
2732
def ParseTimespec(value):
2733
  """Parse a time specification.
2734

2735
  The following suffixed will be recognized:
2736

2737
    - s: seconds
2738
    - m: minutes
2739
    - h: hours
2740
    - d: day
2741
    - w: weeks
2742

2743
  Without any suffix, the value will be taken to be in seconds.
2744

2745
  """
2746
  value = str(value)
2747
  if not value:
2748
    raise errors.OpPrereqError("Empty time specification passed")
2749
  suffix_map = {
2750
    's': 1,
2751
    'm': 60,
2752
    'h': 3600,
2753
    'd': 86400,
2754
    'w': 604800,
2755
    }
2756
  if value[-1] not in suffix_map:
2757
    try:
2758
      value = int(value)
2759
    except (TypeError, ValueError):
2760
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2761
  else:
2762
    multiplier = suffix_map[value[-1]]
2763
    value = value[:-1]
2764
    if not value: # no data left after stripping the suffix
2765
      raise errors.OpPrereqError("Invalid time specification (only"
2766
                                 " suffix passed)")
2767
    try:
2768
      value = int(value) * multiplier
2769
    except (TypeError, ValueError):
2770
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2771
  return value
2772

    
2773

    
2774
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2775
                   filter_master=False):
2776
  """Returns the names of online nodes.
2777

2778
  This function will also log a warning on stderr with the names of
2779
  the online nodes.
2780

2781
  @param nodes: if not empty, use only this subset of nodes (minus the
2782
      offline ones)
2783
  @param cl: if not None, luxi client to use
2784
  @type nowarn: boolean
2785
  @param nowarn: by default, this function will output a note with the
2786
      offline nodes that are skipped; if this parameter is True the
2787
      note is not displayed
2788
  @type secondary_ips: boolean
2789
  @param secondary_ips: if True, return the secondary IPs instead of the
2790
      names, useful for doing network traffic over the replication interface
2791
      (if any)
2792
  @type filter_master: boolean
2793
  @param filter_master: if True, do not return the master node in the list
2794
      (useful in coordination with secondary_ips where we cannot check our
2795
      node name against the list)
2796

2797
  """
2798
  if cl is None:
2799
    cl = GetClient()
2800

    
2801
  if secondary_ips:
2802
    name_idx = 2
2803
  else:
2804
    name_idx = 0
2805

    
2806
  if filter_master:
2807
    master_node = cl.QueryConfigValues(["master_node"])[0]
2808
    filter_fn = lambda x: x != master_node
2809
  else:
2810
    filter_fn = lambda _: True
2811

    
2812
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2813
                         use_locking=False)
2814
  offline = [row[0] for row in result if row[1]]
2815
  if offline and not nowarn:
2816
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2817
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2818

    
2819

    
2820
def _ToStream(stream, txt, *args):
2821
  """Write a message to a stream, bypassing the logging system
2822

2823
  @type stream: file object
2824
  @param stream: the file to which we should write
2825
  @type txt: str
2826
  @param txt: the message
2827

2828
  """
2829
  if args:
2830
    args = tuple(args)
2831
    stream.write(txt % args)
2832
  else:
2833
    stream.write(txt)
2834
  stream.write('\n')
2835
  stream.flush()
2836

    
2837

    
2838
def ToStdout(txt, *args):
2839
  """Write a message to stdout only, bypassing the logging system
2840

2841
  This is just a wrapper over _ToStream.
2842

2843
  @type txt: str
2844
  @param txt: the message
2845

2846
  """
2847
  _ToStream(sys.stdout, txt, *args)
2848

    
2849

    
2850
def ToStderr(txt, *args):
2851
  """Write a message to stderr only, bypassing the logging system
2852

2853
  This is just a wrapper over _ToStream.
2854

2855
  @type txt: str
2856
  @param txt: the message
2857

2858
  """
2859
  _ToStream(sys.stderr, txt, *args)
2860

    
2861

    
2862
class JobExecutor(object):
2863
  """Class which manages the submission and execution of multiple jobs.
2864

2865
  Note that instances of this class should not be reused between
2866
  GetResults() calls.
2867

2868
  """
2869
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2870
    self.queue = []
2871
    if cl is None:
2872
      cl = GetClient()
2873
    self.cl = cl
2874
    self.verbose = verbose
2875
    self.jobs = []
2876
    self.opts = opts
2877
    self.feedback_fn = feedback_fn
2878

    
2879
  def QueueJob(self, name, *ops):
2880
    """Record a job for later submit.
2881

2882
    @type name: string
2883
    @param name: a description of the job, will be used in WaitJobSet
2884
    """
2885
    SetGenericOpcodeOpts(ops, self.opts)
2886
    self.queue.append((name, ops))
2887

    
2888
  def SubmitPending(self, each=False):
2889
    """Submit all pending jobs.
2890

2891
    """
2892
    if each:
2893
      results = []
2894
      for row in self.queue:
2895
        # SubmitJob will remove the success status, but raise an exception if
2896
        # the submission fails, so we'll notice that anyway.
2897
        results.append([True, self.cl.SubmitJob(row[1])])
2898
    else:
2899
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2900
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2901
                                                            self.queue)):
2902
      self.jobs.append((idx, status, data, name))
2903

    
2904
  def _ChooseJob(self):
2905
    """Choose a non-waiting/queued job to poll next.
2906

2907
    """
2908
    assert self.jobs, "_ChooseJob called with empty job list"
2909

    
2910
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2911
    assert result
2912

    
2913
    for job_data, status in zip(self.jobs, result):
2914
      if (isinstance(status, list) and status and
2915
          status[0] in (constants.JOB_STATUS_QUEUED,
2916
                        constants.JOB_STATUS_WAITLOCK,
2917
                        constants.JOB_STATUS_CANCELING)):
2918
        # job is still present and waiting
2919
        continue
2920
      # good candidate found (either running job or lost job)
2921
      self.jobs.remove(job_data)
2922
      return job_data
2923

    
2924
    # no job found
2925
    return self.jobs.pop(0)
2926

    
2927
  def GetResults(self):
2928
    """Wait for and return the results of all jobs.
2929

2930
    @rtype: list
2931
    @return: list of tuples (success, job results), in the same order
2932
        as the submitted jobs; if a job has failed, instead of the result
2933
        there will be the error message
2934

2935
    """
2936
    if not self.jobs:
2937
      self.SubmitPending()
2938
    results = []
2939
    if self.verbose:
2940
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2941
      if ok_jobs:
2942
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2943

    
2944
    # first, remove any non-submitted jobs
2945
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2946
    for idx, _, jid, name in failures:
2947
      ToStderr("Failed to submit job for %s: %s", name, jid)
2948
      results.append((idx, False, jid))
2949

    
2950
    while self.jobs:
2951
      (idx, _, jid, name) = self._ChooseJob()
2952
      ToStdout("Waiting for job %s for %s...", jid, name)
2953
      try:
2954
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2955
        success = True
2956
      except errors.JobLost, err:
2957
        _, job_result = FormatError(err)
2958
        ToStderr("Job %s for %s has been archived, cannot check its result",
2959
                 jid, name)
2960
        success = False
2961
      except (errors.GenericError, luxi.ProtocolError), err:
2962
        _, job_result = FormatError(err)
2963
        success = False
2964
        # the error message will always be shown, verbose or not
2965
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2966

    
2967
      results.append((idx, success, job_result))
2968

    
2969
    # sort based on the index, then drop it
2970
    results.sort()
2971
    results = [i[1:] for i in results]
2972

    
2973
    return results
2974

    
2975
  def WaitOrShow(self, wait):
2976
    """Wait for job results or only print the job IDs.
2977

2978
    @type wait: boolean
2979
    @param wait: whether to wait or not
2980

2981
    """
2982
    if wait:
2983
      return self.GetResults()
2984
    else:
2985
      if not self.jobs:
2986
        self.SubmitPending()
2987
      for _, status, result, name in self.jobs:
2988
        if status:
2989
          ToStdout("%s: %s", result, name)
2990
        else:
2991
          ToStderr("Failure for %s: %s", name, result)
2992
      return [row[1:3] for row in self.jobs]
2993

    
2994

    
2995
def FormatParameterDict(buf, param_dict, actual, level=1):
2996
  """Formats a parameter dictionary.
2997

2998
  @type buf: L{StringIO}
2999
  @param buf: the buffer into which to write
3000
  @type param_dict: dict
3001
  @param param_dict: the own parameters
3002
  @type actual: dict
3003
  @param actual: the current parameter set (including defaults)
3004
  @param level: Level of indent
3005

3006
  """
3007
  indent = "  " * level
3008
  for key in sorted(actual):
3009
    val = param_dict.get(key, "default (%s)" % actual[key])
3010
    buf.write("%s- %s: %s\n" % (indent, key, val))
3011

    
3012

    
3013
def ConfirmOperation(names, list_type, text, extra=""):
3014
  """Ask the user to confirm an operation on a list of list_type.
3015

3016
  This function is used to request confirmation for doing an operation
3017
  on a given list of list_type.
3018

3019
  @type names: list
3020
  @param names: the list of names that we display when
3021
      we ask for confirmation
3022
  @type list_type: str
3023
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3024
  @type text: str
3025
  @param text: the operation that the user should confirm
3026
  @rtype: boolean
3027
  @return: True or False depending on user's confirmation.
3028

3029
  """
3030
  count = len(names)
3031
  msg = ("The %s will operate on %d %s.\n%s"
3032
         "Do you want to continue?" % (text, count, list_type, extra))
3033
  affected = (("\nAffected %s:\n" % list_type) +
3034
              "\n".join(["  %s" % name for name in names]))
3035

    
3036
  choices = [("y", True, "Yes, execute the %s" % text),
3037
             ("n", False, "No, abort the %s" % text)]
3038

    
3039
  if count > 20:
3040
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3041
    question = msg
3042
  else:
3043
    question = msg + affected
3044

    
3045
  choice = AskUser(question, choices)
3046
  if choice == "v":
3047
    choices.pop(1)
3048
    choice = AskUser(msg + affected, choices)
3049
  return choice