Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 0f8810df

History | View | Annotate | Download (101.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
from cStringIO import StringIO
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import opcodes
38
from ganeti import luxi
39
from ganeti import ssconf
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45

    
46
from optparse import (OptionParser, TitledHelpFormatter,
47
                      Option, OptionValueError)
48

    
49

    
50
__all__ = [
51
  # Command line options
52
  "ADD_UIDS_OPT",
53
  "ALLOCATABLE_OPT",
54
  "ALLOC_POLICY_OPT",
55
  "ALL_OPT",
56
  "ALLOW_FAILOVER_OPT",
57
  "AUTO_PROMOTE_OPT",
58
  "AUTO_REPLACE_OPT",
59
  "BACKEND_OPT",
60
  "BLK_OS_OPT",
61
  "CAPAB_MASTER_OPT",
62
  "CAPAB_VM_OPT",
63
  "CLEANUP_OPT",
64
  "CLUSTER_DOMAIN_SECRET_OPT",
65
  "CONFIRM_OPT",
66
  "CP_SIZE_OPT",
67
  "DEBUG_OPT",
68
  "DEBUG_SIMERR_OPT",
69
  "DISKIDX_OPT",
70
  "DISK_OPT",
71
  "DISK_TEMPLATE_OPT",
72
  "DRAINED_OPT",
73
  "DRY_RUN_OPT",
74
  "DRBD_HELPER_OPT",
75
  "DST_NODE_OPT",
76
  "EARLY_RELEASE_OPT",
77
  "ENABLED_HV_OPT",
78
  "ERROR_CODES_OPT",
79
  "FIELDS_OPT",
80
  "FILESTORE_DIR_OPT",
81
  "FILESTORE_DRIVER_OPT",
82
  "FORCE_FILTER_OPT",
83
  "FORCE_OPT",
84
  "FORCE_VARIANT_OPT",
85
  "GLOBAL_FILEDIR_OPT",
86
  "HID_OS_OPT",
87
  "GLOBAL_SHARED_FILEDIR_OPT",
88
  "HVLIST_OPT",
89
  "HVOPTS_OPT",
90
  "HYPERVISOR_OPT",
91
  "IALLOCATOR_OPT",
92
  "DEFAULT_IALLOCATOR_OPT",
93
  "IDENTIFY_DEFAULTS_OPT",
94
  "IGNORE_CONSIST_OPT",
95
  "IGNORE_FAILURES_OPT",
96
  "IGNORE_OFFLINE_OPT",
97
  "IGNORE_REMOVE_FAILURES_OPT",
98
  "IGNORE_SECONDARIES_OPT",
99
  "IGNORE_SIZE_OPT",
100
  "INTERVAL_OPT",
101
  "MAC_PREFIX_OPT",
102
  "MAINTAIN_NODE_HEALTH_OPT",
103
  "MASTER_NETDEV_OPT",
104
  "MC_OPT",
105
  "MIGRATION_MODE_OPT",
106
  "NET_OPT",
107
  "NEW_CLUSTER_CERT_OPT",
108
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109
  "NEW_CONFD_HMAC_KEY_OPT",
110
  "NEW_RAPI_CERT_OPT",
111
  "NEW_SECONDARY_OPT",
112
  "NIC_PARAMS_OPT",
113
  "NODE_FORCE_JOIN_OPT",
114
  "NODE_LIST_OPT",
115
  "NODE_PLACEMENT_OPT",
116
  "NODEGROUP_OPT",
117
  "NODE_PARAMS_OPT",
118
  "NODE_POWERED_OPT",
119
  "NODRBD_STORAGE_OPT",
120
  "NOHDR_OPT",
121
  "NOIPCHECK_OPT",
122
  "NO_INSTALL_OPT",
123
  "NONAMECHECK_OPT",
124
  "NOLVM_STORAGE_OPT",
125
  "NOMODIFY_ETCHOSTS_OPT",
126
  "NOMODIFY_SSH_SETUP_OPT",
127
  "NONICS_OPT",
128
  "NONLIVE_OPT",
129
  "NONPLUS1_OPT",
130
  "NOSHUTDOWN_OPT",
131
  "NOSTART_OPT",
132
  "NOSSH_KEYCHECK_OPT",
133
  "NOVOTING_OPT",
134
  "NO_REMEMBER_OPT",
135
  "NWSYNC_OPT",
136
  "ON_PRIMARY_OPT",
137
  "ON_SECONDARY_OPT",
138
  "OFFLINE_OPT",
139
  "OSPARAMS_OPT",
140
  "OS_OPT",
141
  "OS_SIZE_OPT",
142
  "OOB_TIMEOUT_OPT",
143
  "POWER_DELAY_OPT",
144
  "PREALLOC_WIPE_DISKS_OPT",
145
  "PRIMARY_IP_VERSION_OPT",
146
  "PRIORITY_OPT",
147
  "RAPI_CERT_OPT",
148
  "READD_OPT",
149
  "REBOOT_TYPE_OPT",
150
  "REMOVE_INSTANCE_OPT",
151
  "REMOVE_UIDS_OPT",
152
  "RESERVED_LVS_OPT",
153
  "ROMAN_OPT",
154
  "SECONDARY_IP_OPT",
155
  "SELECT_OS_OPT",
156
  "SEP_OPT",
157
  "SHOWCMD_OPT",
158
  "SHUTDOWN_TIMEOUT_OPT",
159
  "SINGLE_NODE_OPT",
160
  "SRC_DIR_OPT",
161
  "SRC_NODE_OPT",
162
  "SUBMIT_OPT",
163
  "STATIC_OPT",
164
  "SYNC_OPT",
165
  "TAG_ADD_OPT",
166
  "TAG_SRC_OPT",
167
  "TIMEOUT_OPT",
168
  "UIDPOOL_OPT",
169
  "USEUNITS_OPT",
170
  "USE_REPL_NET_OPT",
171
  "VERBOSE_OPT",
172
  "VG_NAME_OPT",
173
  "YES_DOIT_OPT",
174
  # Generic functions for CLI programs
175
  "ConfirmOperation",
176
  "GenericMain",
177
  "GenericInstanceCreate",
178
  "GenericList",
179
  "GenericListFields",
180
  "GetClient",
181
  "GetOnlineNodes",
182
  "JobExecutor",
183
  "JobSubmittedException",
184
  "ParseTimespec",
185
  "RunWhileClusterStopped",
186
  "SubmitOpCode",
187
  "SubmitOrSend",
188
  "UsesRPC",
189
  # Formatting functions
190
  "ToStderr", "ToStdout",
191
  "FormatError",
192
  "FormatQueryResult",
193
  "FormatParameterDict",
194
  "GenerateTable",
195
  "AskUser",
196
  "FormatTimestamp",
197
  "FormatLogMessage",
198
  # Tags functions
199
  "ListTags",
200
  "AddTags",
201
  "RemoveTags",
202
  # command line options support infrastructure
203
  "ARGS_MANY_INSTANCES",
204
  "ARGS_MANY_NODES",
205
  "ARGS_MANY_GROUPS",
206
  "ARGS_NONE",
207
  "ARGS_ONE_INSTANCE",
208
  "ARGS_ONE_NODE",
209
  "ARGS_ONE_GROUP",
210
  "ARGS_ONE_OS",
211
  "ArgChoice",
212
  "ArgCommand",
213
  "ArgFile",
214
  "ArgGroup",
215
  "ArgHost",
216
  "ArgInstance",
217
  "ArgJobId",
218
  "ArgNode",
219
  "ArgOs",
220
  "ArgSuggest",
221
  "ArgUnknown",
222
  "OPT_COMPL_INST_ADD_NODES",
223
  "OPT_COMPL_MANY_NODES",
224
  "OPT_COMPL_ONE_IALLOCATOR",
225
  "OPT_COMPL_ONE_INSTANCE",
226
  "OPT_COMPL_ONE_NODE",
227
  "OPT_COMPL_ONE_NODEGROUP",
228
  "OPT_COMPL_ONE_OS",
229
  "cli_option",
230
  "SplitNodeOption",
231
  "CalculateOSNames",
232
  "ParseFields",
233
  "COMMON_CREATE_OPTS",
234
  ]
235

    
236
NO_PREFIX = "no_"
237
UN_PREFIX = "-"
238

    
239
#: Priorities (sorted)
240
_PRIORITY_NAMES = [
241
  ("low", constants.OP_PRIO_LOW),
242
  ("normal", constants.OP_PRIO_NORMAL),
243
  ("high", constants.OP_PRIO_HIGH),
244
  ]
245

    
246
#: Priority dictionary for easier lookup
247
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
248
# we migrate to Python 2.6
249
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
250

    
251
# Query result status for clients
252
(QR_NORMAL,
253
 QR_UNKNOWN,
254
 QR_INCOMPLETE) = range(3)
255

    
256

    
257
class _Argument:
258
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
259
    self.min = min
260
    self.max = max
261

    
262
  def __repr__(self):
263
    return ("<%s min=%s max=%s>" %
264
            (self.__class__.__name__, self.min, self.max))
265

    
266

    
267
class ArgSuggest(_Argument):
268
  """Suggesting argument.
269

270
  Value can be any of the ones passed to the constructor.
271

272
  """
273
  # pylint: disable-msg=W0622
274
  def __init__(self, min=0, max=None, choices=None):
275
    _Argument.__init__(self, min=min, max=max)
276
    self.choices = choices
277

    
278
  def __repr__(self):
279
    return ("<%s min=%s max=%s choices=%r>" %
280
            (self.__class__.__name__, self.min, self.max, self.choices))
281

    
282

    
283
class ArgChoice(ArgSuggest):
284
  """Choice argument.
285

286
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
287
  but value must be one of the choices.
288

289
  """
290

    
291

    
292
class ArgUnknown(_Argument):
293
  """Unknown argument to program (e.g. determined at runtime).
294

295
  """
296

    
297

    
298
class ArgInstance(_Argument):
299
  """Instances argument.
300

301
  """
302

    
303

    
304
class ArgNode(_Argument):
305
  """Node argument.
306

307
  """
308

    
309

    
310
class ArgGroup(_Argument):
311
  """Node group argument.
312

313
  """
314

    
315

    
316
class ArgJobId(_Argument):
317
  """Job ID argument.
318

319
  """
320

    
321

    
322
class ArgFile(_Argument):
323
  """File path argument.
324

325
  """
326

    
327

    
328
class ArgCommand(_Argument):
329
  """Command argument.
330

331
  """
332

    
333

    
334
class ArgHost(_Argument):
335
  """Host argument.
336

337
  """
338

    
339

    
340
class ArgOs(_Argument):
341
  """OS argument.
342

343
  """
344

    
345

    
346
ARGS_NONE = []
347
ARGS_MANY_INSTANCES = [ArgInstance()]
348
ARGS_MANY_NODES = [ArgNode()]
349
ARGS_MANY_GROUPS = [ArgGroup()]
350
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
351
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
352
# TODO
353
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
354
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
355

    
356

    
357
def _ExtractTagsObject(opts, args):
358
  """Extract the tag type object.
359

360
  Note that this function will modify its args parameter.
361

362
  """
363
  if not hasattr(opts, "tag_type"):
364
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
365
  kind = opts.tag_type
366
  if kind == constants.TAG_CLUSTER:
367
    retval = kind, kind
368
  elif kind in (constants.TAG_NODEGROUP,
369
                constants.TAG_NODE,
370
                constants.TAG_INSTANCE):
371
    if not args:
372
      raise errors.OpPrereqError("no arguments passed to the command")
373
    name = args.pop(0)
374
    retval = kind, name
375
  else:
376
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
377
  return retval
378

    
379

    
380
def _ExtendTags(opts, args):
381
  """Extend the args if a source file has been given.
382

383
  This function will extend the tags with the contents of the file
384
  passed in the 'tags_source' attribute of the opts parameter. A file
385
  named '-' will be replaced by stdin.
386

387
  """
388
  fname = opts.tags_source
389
  if fname is None:
390
    return
391
  if fname == "-":
392
    new_fh = sys.stdin
393
  else:
394
    new_fh = open(fname, "r")
395
  new_data = []
396
  try:
397
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
398
    # because of python bug 1633941
399
    while True:
400
      line = new_fh.readline()
401
      if not line:
402
        break
403
      new_data.append(line.strip())
404
  finally:
405
    new_fh.close()
406
  args.extend(new_data)
407

    
408

    
409
def ListTags(opts, args):
410
  """List the tags on a given object.
411

412
  This is a generic implementation that knows how to deal with all
413
  three cases of tag objects (cluster, node, instance). The opts
414
  argument is expected to contain a tag_type field denoting what
415
  object type we work on.
416

417
  """
418
  kind, name = _ExtractTagsObject(opts, args)
419
  cl = GetClient()
420
  result = cl.QueryTags(kind, name)
421
  result = list(result)
422
  result.sort()
423
  for tag in result:
424
    ToStdout(tag)
425

    
426

    
427
def AddTags(opts, args):
428
  """Add tags on a given object.
429

430
  This is a generic implementation that knows how to deal with all
431
  three cases of tag objects (cluster, node, instance). The opts
432
  argument is expected to contain a tag_type field denoting what
433
  object type we work on.
434

435
  """
436
  kind, name = _ExtractTagsObject(opts, args)
437
  _ExtendTags(opts, args)
438
  if not args:
439
    raise errors.OpPrereqError("No tags to be added")
440
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
441
  SubmitOpCode(op, opts=opts)
442

    
443

    
444
def RemoveTags(opts, args):
445
  """Remove tags from a given object.
446

447
  This is a generic implementation that knows how to deal with all
448
  three cases of tag objects (cluster, node, instance). The opts
449
  argument is expected to contain a tag_type field denoting what
450
  object type we work on.
451

452
  """
453
  kind, name = _ExtractTagsObject(opts, args)
454
  _ExtendTags(opts, args)
455
  if not args:
456
    raise errors.OpPrereqError("No tags to be removed")
457
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
458
  SubmitOpCode(op, opts=opts)
459

    
460

    
461
def check_unit(option, opt, value): # pylint: disable-msg=W0613
462
  """OptParsers custom converter for units.
463

464
  """
465
  try:
466
    return utils.ParseUnit(value)
467
  except errors.UnitParseError, err:
468
    raise OptionValueError("option %s: %s" % (opt, err))
469

    
470

    
471
def _SplitKeyVal(opt, data):
472
  """Convert a KeyVal string into a dict.
473

474
  This function will convert a key=val[,...] string into a dict. Empty
475
  values will be converted specially: keys which have the prefix 'no_'
476
  will have the value=False and the prefix stripped, the others will
477
  have value=True.
478

479
  @type opt: string
480
  @param opt: a string holding the option name for which we process the
481
      data, used in building error messages
482
  @type data: string
483
  @param data: a string of the format key=val,key=val,...
484
  @rtype: dict
485
  @return: {key=val, key=val}
486
  @raises errors.ParameterError: if there are duplicate keys
487

488
  """
489
  kv_dict = {}
490
  if data:
491
    for elem in utils.UnescapeAndSplit(data, sep=","):
492
      if "=" in elem:
493
        key, val = elem.split("=", 1)
494
      else:
495
        if elem.startswith(NO_PREFIX):
496
          key, val = elem[len(NO_PREFIX):], False
497
        elif elem.startswith(UN_PREFIX):
498
          key, val = elem[len(UN_PREFIX):], None
499
        else:
500
          key, val = elem, True
501
      if key in kv_dict:
502
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
503
                                    (key, opt))
504
      kv_dict[key] = val
505
  return kv_dict
506

    
507

    
508
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
509
  """Custom parser for ident:key=val,key=val options.
510

511
  This will store the parsed values as a tuple (ident, {key: val}). As such,
512
  multiple uses of this option via action=append is possible.
513

514
  """
515
  if ":" not in value:
516
    ident, rest = value, ''
517
  else:
518
    ident, rest = value.split(":", 1)
519

    
520
  if ident.startswith(NO_PREFIX):
521
    if rest:
522
      msg = "Cannot pass options when removing parameter groups: %s" % value
523
      raise errors.ParameterError(msg)
524
    retval = (ident[len(NO_PREFIX):], False)
525
  elif ident.startswith(UN_PREFIX):
526
    if rest:
527
      msg = "Cannot pass options when removing parameter groups: %s" % value
528
      raise errors.ParameterError(msg)
529
    retval = (ident[len(UN_PREFIX):], None)
530
  else:
531
    kv_dict = _SplitKeyVal(opt, rest)
532
    retval = (ident, kv_dict)
533
  return retval
534

    
535

    
536
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
537
  """Custom parser class for key=val,key=val options.
538

539
  This will store the parsed values as a dict {key: val}.
540

541
  """
542
  return _SplitKeyVal(opt, value)
543

    
544

    
545
def check_bool(option, opt, value): # pylint: disable-msg=W0613
546
  """Custom parser for yes/no options.
547

548
  This will store the parsed value as either True or False.
549

550
  """
551
  value = value.lower()
552
  if value == constants.VALUE_FALSE or value == "no":
553
    return False
554
  elif value == constants.VALUE_TRUE or value == "yes":
555
    return True
556
  else:
557
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
558

    
559

    
560
# completion_suggestion is normally a list. Using numeric values not evaluating
561
# to False for dynamic completion.
562
(OPT_COMPL_MANY_NODES,
563
 OPT_COMPL_ONE_NODE,
564
 OPT_COMPL_ONE_INSTANCE,
565
 OPT_COMPL_ONE_OS,
566
 OPT_COMPL_ONE_IALLOCATOR,
567
 OPT_COMPL_INST_ADD_NODES,
568
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
569

    
570
OPT_COMPL_ALL = frozenset([
571
  OPT_COMPL_MANY_NODES,
572
  OPT_COMPL_ONE_NODE,
573
  OPT_COMPL_ONE_INSTANCE,
574
  OPT_COMPL_ONE_OS,
575
  OPT_COMPL_ONE_IALLOCATOR,
576
  OPT_COMPL_INST_ADD_NODES,
577
  OPT_COMPL_ONE_NODEGROUP,
578
  ])
579

    
580

    
581
class CliOption(Option):
582
  """Custom option class for optparse.
583

584
  """
585
  ATTRS = Option.ATTRS + [
586
    "completion_suggest",
587
    ]
588
  TYPES = Option.TYPES + (
589
    "identkeyval",
590
    "keyval",
591
    "unit",
592
    "bool",
593
    )
594
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
595
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
596
  TYPE_CHECKER["keyval"] = check_key_val
597
  TYPE_CHECKER["unit"] = check_unit
598
  TYPE_CHECKER["bool"] = check_bool
599

    
600

    
601
# optparse.py sets make_option, so we do it for our own option class, too
602
cli_option = CliOption
603

    
604

    
605
_YORNO = "yes|no"
606

    
607
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
608
                       help="Increase debugging level")
609

    
610
NOHDR_OPT = cli_option("--no-headers", default=False,
611
                       action="store_true", dest="no_headers",
612
                       help="Don't display column headers")
613

    
614
SEP_OPT = cli_option("--separator", default=None,
615
                     action="store", dest="separator",
616
                     help=("Separator between output fields"
617
                           " (defaults to one space)"))
618

    
619
USEUNITS_OPT = cli_option("--units", default=None,
620
                          dest="units", choices=('h', 'm', 'g', 't'),
621
                          help="Specify units for output (one of h/m/g/t)")
622

    
623
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
624
                        type="string", metavar="FIELDS",
625
                        help="Comma separated list of output fields")
626

    
627
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
628
                       default=False, help="Force the operation")
629

    
630
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
631
                         default=False, help="Do not require confirmation")
632

    
633
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
634
                                  action="store_true", default=False,
635
                                  help=("Ignore offline nodes and do as much"
636
                                        " as possible"))
637

    
638
TAG_ADD_OPT = cli_option("--tags", dest="tags",
639
                         default=None, help="Comma-separated list of instance"
640
                                            " tags")
641

    
642
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
643
                         default=None, help="File with tag names")
644

    
645
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
646
                        default=False, action="store_true",
647
                        help=("Submit the job and return the job ID, but"
648
                              " don't wait for the job to finish"))
649

    
650
SYNC_OPT = cli_option("--sync", dest="do_locking",
651
                      default=False, action="store_true",
652
                      help=("Grab locks while doing the queries"
653
                            " in order to ensure more consistent results"))
654

    
655
DRY_RUN_OPT = cli_option("--dry-run", default=False,
656
                         action="store_true",
657
                         help=("Do not execute the operation, just run the"
658
                               " check steps and verify it it could be"
659
                               " executed"))
660

    
661
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
662
                         action="store_true",
663
                         help="Increase the verbosity of the operation")
664

    
665
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
666
                              action="store_true", dest="simulate_errors",
667
                              help="Debugging option that makes the operation"
668
                              " treat most runtime checks as failed")
669

    
670
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
671
                        default=True, action="store_false",
672
                        help="Don't wait for sync (DANGEROUS!)")
673

    
674
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
675
                               help=("Custom disk setup (%s)" %
676
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
677
                               default=None, metavar="TEMPL",
678
                               choices=list(constants.DISK_TEMPLATES))
679

    
680
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
681
                        help="Do not create any network cards for"
682
                        " the instance")
683

    
684
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
685
                               help="Relative path under default cluster-wide"
686
                               " file storage dir to store file-based disks",
687
                               default=None, metavar="<DIR>")
688

    
689
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
690
                                  help="Driver to use for image files",
691
                                  default="loop", metavar="<DRIVER>",
692
                                  choices=list(constants.FILE_DRIVER))
693

    
694
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
695
                            help="Select nodes for the instance automatically"
696
                            " using the <NAME> iallocator plugin",
697
                            default=None, type="string",
698
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
699

    
700
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
701
                            metavar="<NAME>",
702
                            help="Set the default instance allocator plugin",
703
                            default=None, type="string",
704
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
705

    
706
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
707
                    metavar="<os>",
708
                    completion_suggest=OPT_COMPL_ONE_OS)
709

    
710
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
711
                         type="keyval", default={},
712
                         help="OS parameters")
713

    
714
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
715
                               action="store_true", default=False,
716
                               help="Force an unknown variant")
717

    
718
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
719
                            action="store_true", default=False,
720
                            help="Do not install the OS (will"
721
                            " enable no-start)")
722

    
723
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
724
                         type="keyval", default={},
725
                         help="Backend parameters")
726

    
727
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
728
                         default={}, dest="hvparams",
729
                         help="Hypervisor parameters")
730

    
731
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
732
                            help="Hypervisor and hypervisor options, in the"
733
                            " format hypervisor:option=value,option=value,...",
734
                            default=None, type="identkeyval")
735

    
736
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
737
                        help="Hypervisor and hypervisor options, in the"
738
                        " format hypervisor:option=value,option=value,...",
739
                        default=[], action="append", type="identkeyval")
740

    
741
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
742
                           action="store_false",
743
                           help="Don't check that the instance's IP"
744
                           " is alive")
745

    
746
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
747
                             default=True, action="store_false",
748
                             help="Don't check that the instance's name"
749
                             " is resolvable")
750

    
751
NET_OPT = cli_option("--net",
752
                     help="NIC parameters", default=[],
753
                     dest="nics", action="append", type="identkeyval")
754

    
755
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
756
                      dest="disks", action="append", type="identkeyval")
757

    
758
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
759
                         help="Comma-separated list of disks"
760
                         " indices to act on (e.g. 0,2) (optional,"
761
                         " defaults to all disks)")
762

    
763
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
764
                         help="Enforces a single-disk configuration using the"
765
                         " given disk size, in MiB unless a suffix is used",
766
                         default=None, type="unit", metavar="<size>")
767

    
768
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
769
                                dest="ignore_consistency",
770
                                action="store_true", default=False,
771
                                help="Ignore the consistency of the disks on"
772
                                " the secondary")
773

    
774
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
775
                                dest="allow_failover",
776
                                action="store_true", default=False,
777
                                help="If migration is not possible fallback to"
778
                                     " failover")
779

    
780
NONLIVE_OPT = cli_option("--non-live", dest="live",
781
                         default=True, action="store_false",
782
                         help="Do a non-live migration (this usually means"
783
                         " freeze the instance, save the state, transfer and"
784
                         " only then resume running on the secondary node)")
785

    
786
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
787
                                default=None,
788
                                choices=list(constants.HT_MIGRATION_MODES),
789
                                help="Override default migration mode (choose"
790
                                " either live or non-live")
791

    
792
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
793
                                help="Target node and optional secondary node",
794
                                metavar="<pnode>[:<snode>]",
795
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
796

    
797
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
798
                           action="append", metavar="<node>",
799
                           help="Use only this node (can be used multiple"
800
                           " times, if not given defaults to all nodes)",
801
                           completion_suggest=OPT_COMPL_ONE_NODE)
802

    
803
NODEGROUP_OPT = cli_option("-g", "--node-group",
804
                           dest="nodegroup",
805
                           help="Node group (name or uuid)",
806
                           metavar="<nodegroup>",
807
                           default=None, type="string",
808
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
809

    
810
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
811
                             metavar="<node>",
812
                             completion_suggest=OPT_COMPL_ONE_NODE)
813

    
814
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
815
                         action="store_false",
816
                         help="Don't start the instance after creation")
817

    
818
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
819
                         action="store_true", default=False,
820
                         help="Show command instead of executing it")
821

    
822
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
823
                         default=False, action="store_true",
824
                         help="Instead of performing the migration, try to"
825
                         " recover from a failed cleanup. This is safe"
826
                         " to run even if the instance is healthy, but it"
827
                         " will create extra replication traffic and "
828
                         " disrupt briefly the replication (like during the"
829
                         " migration")
830

    
831
STATIC_OPT = cli_option("-s", "--static", dest="static",
832
                        action="store_true", default=False,
833
                        help="Only show configuration data, not runtime data")
834

    
835
ALL_OPT = cli_option("--all", dest="show_all",
836
                     default=False, action="store_true",
837
                     help="Show info on all instances on the cluster."
838
                     " This can take a long time to run, use wisely")
839

    
840
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
841
                           action="store_true", default=False,
842
                           help="Interactive OS reinstall, lists available"
843
                           " OS templates for selection")
844

    
845
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
846
                                 action="store_true", default=False,
847
                                 help="Remove the instance from the cluster"
848
                                 " configuration even if there are failures"
849
                                 " during the removal process")
850

    
851
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
852
                                        dest="ignore_remove_failures",
853
                                        action="store_true", default=False,
854
                                        help="Remove the instance from the"
855
                                        " cluster configuration even if there"
856
                                        " are failures during the removal"
857
                                        " process")
858

    
859
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
860
                                 action="store_true", default=False,
861
                                 help="Remove the instance from the cluster")
862

    
863
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
864
                               help="Specifies the new node for the instance",
865
                               metavar="NODE", default=None,
866
                               completion_suggest=OPT_COMPL_ONE_NODE)
867

    
868
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
869
                               help="Specifies the new secondary node",
870
                               metavar="NODE", default=None,
871
                               completion_suggest=OPT_COMPL_ONE_NODE)
872

    
873
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
874
                            default=False, action="store_true",
875
                            help="Replace the disk(s) on the primary"
876
                                 " node (applies only to internally mirrored"
877
                                 " disk templates, e.g. %s)" %
878
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
879

    
880
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
881
                              default=False, action="store_true",
882
                              help="Replace the disk(s) on the secondary"
883
                                   " node (applies only to internally mirrored"
884
                                   " disk templates, e.g. %s)" %
885
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
886

    
887
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
888
                              default=False, action="store_true",
889
                              help="Lock all nodes and auto-promote as needed"
890
                              " to MC status")
891

    
892
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
893
                              default=False, action="store_true",
894
                              help="Automatically replace faulty disks"
895
                                   " (applies only to internally mirrored"
896
                                   " disk templates, e.g. %s)" %
897
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
898

    
899
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
900
                             default=False, action="store_true",
901
                             help="Ignore current recorded size"
902
                             " (useful for forcing activation when"
903
                             " the recorded size is wrong)")
904

    
905
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
906
                          metavar="<node>",
907
                          completion_suggest=OPT_COMPL_ONE_NODE)
908

    
909
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
910
                         metavar="<dir>")
911

    
912
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
913
                              help="Specify the secondary ip for the node",
914
                              metavar="ADDRESS", default=None)
915

    
916
READD_OPT = cli_option("--readd", dest="readd",
917
                       default=False, action="store_true",
918
                       help="Readd old node after replacing it")
919

    
920
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
921
                                default=True, action="store_false",
922
                                help="Disable SSH key fingerprint checking")
923

    
924
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
925
                                 default=False, action="store_true",
926
                                 help="Force the joining of a node")
927

    
928
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
929
                    type="bool", default=None, metavar=_YORNO,
930
                    help="Set the master_candidate flag on the node")
931

    
932
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
933
                         type="bool", default=None,
934
                         help=("Set the offline flag on the node"
935
                               " (cluster does not communicate with offline"
936
                               " nodes)"))
937

    
938
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
939
                         type="bool", default=None,
940
                         help=("Set the drained flag on the node"
941
                               " (excluded from allocation operations)"))
942

    
943
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
944
                    type="bool", default=None, metavar=_YORNO,
945
                    help="Set the master_capable flag on the node")
946

    
947
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
948
                    type="bool", default=None, metavar=_YORNO,
949
                    help="Set the vm_capable flag on the node")
950

    
951
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
952
                             type="bool", default=None, metavar=_YORNO,
953
                             help="Set the allocatable flag on a volume")
954

    
955
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
956
                               help="Disable support for lvm based instances"
957
                               " (cluster-wide)",
958
                               action="store_false", default=True)
959

    
960
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
961
                            dest="enabled_hypervisors",
962
                            help="Comma-separated list of hypervisors",
963
                            type="string", default=None)
964

    
965
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
966
                            type="keyval", default={},
967
                            help="NIC parameters")
968

    
969
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
970
                         dest="candidate_pool_size", type="int",
971
                         help="Set the candidate pool size")
972

    
973
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
974
                         help=("Enables LVM and specifies the volume group"
975
                               " name (cluster-wide) for disk allocation"
976
                               " [%s]" % constants.DEFAULT_VG),
977
                         metavar="VG", default=None)
978

    
979
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
980
                          help="Destroy cluster", action="store_true")
981

    
982
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
983
                          help="Skip node agreement check (dangerous)",
984
                          action="store_true", default=False)
985

    
986
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
987
                            help="Specify the mac prefix for the instance IP"
988
                            " addresses, in the format XX:XX:XX",
989
                            metavar="PREFIX",
990
                            default=None)
991

    
992
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
993
                               help="Specify the node interface (cluster-wide)"
994
                               " on which the master IP address will be added"
995
                               " (cluster init default: %s)" %
996
                               constants.DEFAULT_BRIDGE,
997
                               metavar="NETDEV",
998
                               default=None)
999

    
1000
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1001
                                help="Specify the default directory (cluster-"
1002
                                "wide) for storing the file-based disks [%s]" %
1003
                                constants.DEFAULT_FILE_STORAGE_DIR,
1004
                                metavar="DIR",
1005
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1006

    
1007
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1008
                            dest="shared_file_storage_dir",
1009
                            help="Specify the default directory (cluster-"
1010
                            "wide) for storing the shared file-based"
1011
                            " disks [%s]" %
1012
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1013
                            metavar="SHAREDDIR",
1014
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1015

    
1016
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1017
                                   help="Don't modify /etc/hosts",
1018
                                   action="store_false", default=True)
1019

    
1020
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1021
                                    help="Don't initialize SSH keys",
1022
                                    action="store_false", default=True)
1023

    
1024
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1025
                             help="Enable parseable error messages",
1026
                             action="store_true", default=False)
1027

    
1028
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1029
                          help="Skip N+1 memory redundancy tests",
1030
                          action="store_true", default=False)
1031

    
1032
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1033
                             help="Type of reboot: soft/hard/full",
1034
                             default=constants.INSTANCE_REBOOT_HARD,
1035
                             metavar="<REBOOT>",
1036
                             choices=list(constants.REBOOT_TYPES))
1037

    
1038
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1039
                                    dest="ignore_secondaries",
1040
                                    default=False, action="store_true",
1041
                                    help="Ignore errors from secondaries")
1042

    
1043
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1044
                            action="store_false", default=True,
1045
                            help="Don't shutdown the instance (unsafe)")
1046

    
1047
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1048
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1049
                         help="Maximum time to wait")
1050

    
1051
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1052
                         dest="shutdown_timeout", type="int",
1053
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1054
                         help="Maximum time to wait for instance shutdown")
1055

    
1056
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1057
                          default=None,
1058
                          help=("Number of seconds between repetions of the"
1059
                                " command"))
1060

    
1061
EARLY_RELEASE_OPT = cli_option("--early-release",
1062
                               dest="early_release", default=False,
1063
                               action="store_true",
1064
                               help="Release the locks on the secondary"
1065
                               " node(s) early")
1066

    
1067
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1068
                                  dest="new_cluster_cert",
1069
                                  default=False, action="store_true",
1070
                                  help="Generate a new cluster certificate")
1071

    
1072
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1073
                           default=None,
1074
                           help="File containing new RAPI certificate")
1075

    
1076
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1077
                               default=None, action="store_true",
1078
                               help=("Generate a new self-signed RAPI"
1079
                                     " certificate"))
1080

    
1081
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1082
                                    dest="new_confd_hmac_key",
1083
                                    default=False, action="store_true",
1084
                                    help=("Create a new HMAC key for %s" %
1085
                                          constants.CONFD))
1086

    
1087
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1088
                                       dest="cluster_domain_secret",
1089
                                       default=None,
1090
                                       help=("Load new new cluster domain"
1091
                                             " secret from file"))
1092

    
1093
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1094
                                           dest="new_cluster_domain_secret",
1095
                                           default=False, action="store_true",
1096
                                           help=("Create a new cluster domain"
1097
                                                 " secret"))
1098

    
1099
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1100
                              dest="use_replication_network",
1101
                              help="Whether to use the replication network"
1102
                              " for talking to the nodes",
1103
                              action="store_true", default=False)
1104

    
1105
MAINTAIN_NODE_HEALTH_OPT = \
1106
    cli_option("--maintain-node-health", dest="maintain_node_health",
1107
               metavar=_YORNO, default=None, type="bool",
1108
               help="Configure the cluster to automatically maintain node"
1109
               " health, by shutting down unknown instances, shutting down"
1110
               " unknown DRBD devices, etc.")
1111

    
1112
IDENTIFY_DEFAULTS_OPT = \
1113
    cli_option("--identify-defaults", dest="identify_defaults",
1114
               default=False, action="store_true",
1115
               help="Identify which saved instance parameters are equal to"
1116
               " the current cluster defaults and set them as such, instead"
1117
               " of marking them as overridden")
1118

    
1119
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1120
                         action="store", dest="uid_pool",
1121
                         help=("A list of user-ids or user-id"
1122
                               " ranges separated by commas"))
1123

    
1124
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1125
                          action="store", dest="add_uids",
1126
                          help=("A list of user-ids or user-id"
1127
                                " ranges separated by commas, to be"
1128
                                " added to the user-id pool"))
1129

    
1130
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1131
                             action="store", dest="remove_uids",
1132
                             help=("A list of user-ids or user-id"
1133
                                   " ranges separated by commas, to be"
1134
                                   " removed from the user-id pool"))
1135

    
1136
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1137
                             action="store", dest="reserved_lvs",
1138
                             help=("A comma-separated list of reserved"
1139
                                   " logical volumes names, that will be"
1140
                                   " ignored by cluster verify"))
1141

    
1142
ROMAN_OPT = cli_option("--roman",
1143
                       dest="roman_integers", default=False,
1144
                       action="store_true",
1145
                       help="Use roman numbers for positive integers")
1146

    
1147
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1148
                             action="store", default=None,
1149
                             help="Specifies usermode helper for DRBD")
1150

    
1151
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1152
                                action="store_false", default=True,
1153
                                help="Disable support for DRBD")
1154

    
1155
PRIMARY_IP_VERSION_OPT = \
1156
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1157
               action="store", dest="primary_ip_version",
1158
               metavar="%d|%d" % (constants.IP4_VERSION,
1159
                                  constants.IP6_VERSION),
1160
               help="Cluster-wide IP version for primary IP")
1161

    
1162
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1163
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1164
                          choices=_PRIONAME_TO_VALUE.keys(),
1165
                          help="Priority for opcode processing")
1166

    
1167
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1168
                        type="bool", default=None, metavar=_YORNO,
1169
                        help="Sets the hidden flag on the OS")
1170

    
1171
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1172
                        type="bool", default=None, metavar=_YORNO,
1173
                        help="Sets the blacklisted flag on the OS")
1174

    
1175
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1176
                                     type="bool", metavar=_YORNO,
1177
                                     dest="prealloc_wipe_disks",
1178
                                     help=("Wipe disks prior to instance"
1179
                                           " creation"))
1180

    
1181
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1182
                             type="keyval", default=None,
1183
                             help="Node parameters")
1184

    
1185
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1186
                              action="store", metavar="POLICY", default=None,
1187
                              help="Allocation policy for the node group")
1188

    
1189
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1190
                              type="bool", metavar=_YORNO,
1191
                              dest="node_powered",
1192
                              help="Specify if the SoR for node is powered")
1193

    
1194
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1195
                         default=constants.OOB_TIMEOUT,
1196
                         help="Maximum time to wait for out-of-band helper")
1197

    
1198
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1199
                             default=constants.OOB_POWER_DELAY,
1200
                             help="Time in seconds to wait between power-ons")
1201

    
1202
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1203
                              action="store_true", default=False,
1204
                              help=("Whether command argument should be treated"
1205
                                    " as filter"))
1206

    
1207
NO_REMEMBER_OPT = cli_option("--no-remember",
1208
                             dest="no_remember",
1209
                             action="store_true", default=False,
1210
                             help="Perform but do not record the change"
1211
                             " in the configuration")
1212

    
1213

    
1214
#: Options provided by all commands
1215
COMMON_OPTS = [DEBUG_OPT]
1216

    
1217
# common options for creating instances. add and import then add their own
1218
# specific ones.
1219
COMMON_CREATE_OPTS = [
1220
  BACKEND_OPT,
1221
  DISK_OPT,
1222
  DISK_TEMPLATE_OPT,
1223
  FILESTORE_DIR_OPT,
1224
  FILESTORE_DRIVER_OPT,
1225
  HYPERVISOR_OPT,
1226
  IALLOCATOR_OPT,
1227
  NET_OPT,
1228
  NODE_PLACEMENT_OPT,
1229
  NOIPCHECK_OPT,
1230
  NONAMECHECK_OPT,
1231
  NONICS_OPT,
1232
  NWSYNC_OPT,
1233
  OSPARAMS_OPT,
1234
  OS_SIZE_OPT,
1235
  SUBMIT_OPT,
1236
  TAG_ADD_OPT,
1237
  DRY_RUN_OPT,
1238
  PRIORITY_OPT,
1239
  ]
1240

    
1241

    
1242
def _ParseArgs(argv, commands, aliases):
1243
  """Parser for the command line arguments.
1244

1245
  This function parses the arguments and returns the function which
1246
  must be executed together with its (modified) arguments.
1247

1248
  @param argv: the command line
1249
  @param commands: dictionary with special contents, see the design
1250
      doc for cmdline handling
1251
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1252

1253
  """
1254
  if len(argv) == 0:
1255
    binary = "<command>"
1256
  else:
1257
    binary = argv[0].split("/")[-1]
1258

    
1259
  if len(argv) > 1 and argv[1] == "--version":
1260
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1261
             constants.RELEASE_VERSION)
1262
    # Quit right away. That way we don't have to care about this special
1263
    # argument. optparse.py does it the same.
1264
    sys.exit(0)
1265

    
1266
  if len(argv) < 2 or not (argv[1] in commands or
1267
                           argv[1] in aliases):
1268
    # let's do a nice thing
1269
    sortedcmds = commands.keys()
1270
    sortedcmds.sort()
1271

    
1272
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1273
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1274
    ToStdout("")
1275

    
1276
    # compute the max line length for cmd + usage
1277
    mlen = max([len(" %s" % cmd) for cmd in commands])
1278
    mlen = min(60, mlen) # should not get here...
1279

    
1280
    # and format a nice command list
1281
    ToStdout("Commands:")
1282
    for cmd in sortedcmds:
1283
      cmdstr = " %s" % (cmd,)
1284
      help_text = commands[cmd][4]
1285
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1286
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1287
      for line in help_lines:
1288
        ToStdout("%-*s   %s", mlen, "", line)
1289

    
1290
    ToStdout("")
1291

    
1292
    return None, None, None
1293

    
1294
  # get command, unalias it, and look it up in commands
1295
  cmd = argv.pop(1)
1296
  if cmd in aliases:
1297
    if cmd in commands:
1298
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1299
                                   " command" % cmd)
1300

    
1301
    if aliases[cmd] not in commands:
1302
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1303
                                   " command '%s'" % (cmd, aliases[cmd]))
1304

    
1305
    cmd = aliases[cmd]
1306

    
1307
  func, args_def, parser_opts, usage, description = commands[cmd]
1308
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1309
                        description=description,
1310
                        formatter=TitledHelpFormatter(),
1311
                        usage="%%prog %s %s" % (cmd, usage))
1312
  parser.disable_interspersed_args()
1313
  options, args = parser.parse_args()
1314

    
1315
  if not _CheckArguments(cmd, args_def, args):
1316
    return None, None, None
1317

    
1318
  return func, options, args
1319

    
1320

    
1321
def _CheckArguments(cmd, args_def, args):
1322
  """Verifies the arguments using the argument definition.
1323

1324
  Algorithm:
1325

1326
    1. Abort with error if values specified by user but none expected.
1327

1328
    1. For each argument in definition
1329

1330
      1. Keep running count of minimum number of values (min_count)
1331
      1. Keep running count of maximum number of values (max_count)
1332
      1. If it has an unlimited number of values
1333

1334
        1. Abort with error if it's not the last argument in the definition
1335

1336
    1. If last argument has limited number of values
1337

1338
      1. Abort with error if number of values doesn't match or is too large
1339

1340
    1. Abort with error if user didn't pass enough values (min_count)
1341

1342
  """
1343
  if args and not args_def:
1344
    ToStderr("Error: Command %s expects no arguments", cmd)
1345
    return False
1346

    
1347
  min_count = None
1348
  max_count = None
1349
  check_max = None
1350

    
1351
  last_idx = len(args_def) - 1
1352

    
1353
  for idx, arg in enumerate(args_def):
1354
    if min_count is None:
1355
      min_count = arg.min
1356
    elif arg.min is not None:
1357
      min_count += arg.min
1358

    
1359
    if max_count is None:
1360
      max_count = arg.max
1361
    elif arg.max is not None:
1362
      max_count += arg.max
1363

    
1364
    if idx == last_idx:
1365
      check_max = (arg.max is not None)
1366

    
1367
    elif arg.max is None:
1368
      raise errors.ProgrammerError("Only the last argument can have max=None")
1369

    
1370
  if check_max:
1371
    # Command with exact number of arguments
1372
    if (min_count is not None and max_count is not None and
1373
        min_count == max_count and len(args) != min_count):
1374
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1375
      return False
1376

    
1377
    # Command with limited number of arguments
1378
    if max_count is not None and len(args) > max_count:
1379
      ToStderr("Error: Command %s expects only %d argument(s)",
1380
               cmd, max_count)
1381
      return False
1382

    
1383
  # Command with some required arguments
1384
  if min_count is not None and len(args) < min_count:
1385
    ToStderr("Error: Command %s expects at least %d argument(s)",
1386
             cmd, min_count)
1387
    return False
1388

    
1389
  return True
1390

    
1391

    
1392
def SplitNodeOption(value):
1393
  """Splits the value of a --node option.
1394

1395
  """
1396
  if value and ':' in value:
1397
    return value.split(':', 1)
1398
  else:
1399
    return (value, None)
1400

    
1401

    
1402
def CalculateOSNames(os_name, os_variants):
1403
  """Calculates all the names an OS can be called, according to its variants.
1404

1405
  @type os_name: string
1406
  @param os_name: base name of the os
1407
  @type os_variants: list or None
1408
  @param os_variants: list of supported variants
1409
  @rtype: list
1410
  @return: list of valid names
1411

1412
  """
1413
  if os_variants:
1414
    return ['%s+%s' % (os_name, v) for v in os_variants]
1415
  else:
1416
    return [os_name]
1417

    
1418

    
1419
def ParseFields(selected, default):
1420
  """Parses the values of "--field"-like options.
1421

1422
  @type selected: string or None
1423
  @param selected: User-selected options
1424
  @type default: list
1425
  @param default: Default fields
1426

1427
  """
1428
  if selected is None:
1429
    return default
1430

    
1431
  if selected.startswith("+"):
1432
    return default + selected[1:].split(",")
1433

    
1434
  return selected.split(",")
1435

    
1436

    
1437
UsesRPC = rpc.RunWithRPC
1438

    
1439

    
1440
def AskUser(text, choices=None):
1441
  """Ask the user a question.
1442

1443
  @param text: the question to ask
1444

1445
  @param choices: list with elements tuples (input_char, return_value,
1446
      description); if not given, it will default to: [('y', True,
1447
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1448
      note that the '?' char is reserved for help
1449

1450
  @return: one of the return values from the choices list; if input is
1451
      not possible (i.e. not running with a tty, we return the last
1452
      entry from the list
1453

1454
  """
1455
  if choices is None:
1456
    choices = [('y', True, 'Perform the operation'),
1457
               ('n', False, 'Do not perform the operation')]
1458
  if not choices or not isinstance(choices, list):
1459
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1460
  for entry in choices:
1461
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1462
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1463

    
1464
  answer = choices[-1][1]
1465
  new_text = []
1466
  for line in text.splitlines():
1467
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1468
  text = "\n".join(new_text)
1469
  try:
1470
    f = file("/dev/tty", "a+")
1471
  except IOError:
1472
    return answer
1473
  try:
1474
    chars = [entry[0] for entry in choices]
1475
    chars[-1] = "[%s]" % chars[-1]
1476
    chars.append('?')
1477
    maps = dict([(entry[0], entry[1]) for entry in choices])
1478
    while True:
1479
      f.write(text)
1480
      f.write('\n')
1481
      f.write("/".join(chars))
1482
      f.write(": ")
1483
      line = f.readline(2).strip().lower()
1484
      if line in maps:
1485
        answer = maps[line]
1486
        break
1487
      elif line == '?':
1488
        for entry in choices:
1489
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1490
        f.write("\n")
1491
        continue
1492
  finally:
1493
    f.close()
1494
  return answer
1495

    
1496

    
1497
class JobSubmittedException(Exception):
1498
  """Job was submitted, client should exit.
1499

1500
  This exception has one argument, the ID of the job that was
1501
  submitted. The handler should print this ID.
1502

1503
  This is not an error, just a structured way to exit from clients.
1504

1505
  """
1506

    
1507

    
1508
def SendJob(ops, cl=None):
1509
  """Function to submit an opcode without waiting for the results.
1510

1511
  @type ops: list
1512
  @param ops: list of opcodes
1513
  @type cl: luxi.Client
1514
  @param cl: the luxi client to use for communicating with the master;
1515
             if None, a new client will be created
1516

1517
  """
1518
  if cl is None:
1519
    cl = GetClient()
1520

    
1521
  job_id = cl.SubmitJob(ops)
1522

    
1523
  return job_id
1524

    
1525

    
1526
def GenericPollJob(job_id, cbs, report_cbs):
1527
  """Generic job-polling function.
1528

1529
  @type job_id: number
1530
  @param job_id: Job ID
1531
  @type cbs: Instance of L{JobPollCbBase}
1532
  @param cbs: Data callbacks
1533
  @type report_cbs: Instance of L{JobPollReportCbBase}
1534
  @param report_cbs: Reporting callbacks
1535

1536
  """
1537
  prev_job_info = None
1538
  prev_logmsg_serial = None
1539

    
1540
  status = None
1541

    
1542
  while True:
1543
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1544
                                      prev_logmsg_serial)
1545
    if not result:
1546
      # job not found, go away!
1547
      raise errors.JobLost("Job with id %s lost" % job_id)
1548

    
1549
    if result == constants.JOB_NOTCHANGED:
1550
      report_cbs.ReportNotChanged(job_id, status)
1551

    
1552
      # Wait again
1553
      continue
1554

    
1555
    # Split result, a tuple of (field values, log entries)
1556
    (job_info, log_entries) = result
1557
    (status, ) = job_info
1558

    
1559
    if log_entries:
1560
      for log_entry in log_entries:
1561
        (serial, timestamp, log_type, message) = log_entry
1562
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1563
                                    log_type, message)
1564
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1565

    
1566
    # TODO: Handle canceled and archived jobs
1567
    elif status in (constants.JOB_STATUS_SUCCESS,
1568
                    constants.JOB_STATUS_ERROR,
1569
                    constants.JOB_STATUS_CANCELING,
1570
                    constants.JOB_STATUS_CANCELED):
1571
      break
1572

    
1573
    prev_job_info = job_info
1574

    
1575
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1576
  if not jobs:
1577
    raise errors.JobLost("Job with id %s lost" % job_id)
1578

    
1579
  status, opstatus, result = jobs[0]
1580

    
1581
  if status == constants.JOB_STATUS_SUCCESS:
1582
    return result
1583

    
1584
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1585
    raise errors.OpExecError("Job was canceled")
1586

    
1587
  has_ok = False
1588
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1589
    if status == constants.OP_STATUS_SUCCESS:
1590
      has_ok = True
1591
    elif status == constants.OP_STATUS_ERROR:
1592
      errors.MaybeRaise(msg)
1593

    
1594
      if has_ok:
1595
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1596
                                 (idx, msg))
1597

    
1598
      raise errors.OpExecError(str(msg))
1599

    
1600
  # default failure mode
1601
  raise errors.OpExecError(result)
1602

    
1603

    
1604
class JobPollCbBase:
1605
  """Base class for L{GenericPollJob} callbacks.
1606

1607
  """
1608
  def __init__(self):
1609
    """Initializes this class.
1610

1611
    """
1612

    
1613
  def WaitForJobChangeOnce(self, job_id, fields,
1614
                           prev_job_info, prev_log_serial):
1615
    """Waits for changes on a job.
1616

1617
    """
1618
    raise NotImplementedError()
1619

    
1620
  def QueryJobs(self, job_ids, fields):
1621
    """Returns the selected fields for the selected job IDs.
1622

1623
    @type job_ids: list of numbers
1624
    @param job_ids: Job IDs
1625
    @type fields: list of strings
1626
    @param fields: Fields
1627

1628
    """
1629
    raise NotImplementedError()
1630

    
1631

    
1632
class JobPollReportCbBase:
1633
  """Base class for L{GenericPollJob} reporting callbacks.
1634

1635
  """
1636
  def __init__(self):
1637
    """Initializes this class.
1638

1639
    """
1640

    
1641
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1642
    """Handles a log message.
1643

1644
    """
1645
    raise NotImplementedError()
1646

    
1647
  def ReportNotChanged(self, job_id, status):
1648
    """Called for if a job hasn't changed in a while.
1649

1650
    @type job_id: number
1651
    @param job_id: Job ID
1652
    @type status: string or None
1653
    @param status: Job status if available
1654

1655
    """
1656
    raise NotImplementedError()
1657

    
1658

    
1659
class _LuxiJobPollCb(JobPollCbBase):
1660
  def __init__(self, cl):
1661
    """Initializes this class.
1662

1663
    """
1664
    JobPollCbBase.__init__(self)
1665
    self.cl = cl
1666

    
1667
  def WaitForJobChangeOnce(self, job_id, fields,
1668
                           prev_job_info, prev_log_serial):
1669
    """Waits for changes on a job.
1670

1671
    """
1672
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1673
                                        prev_job_info, prev_log_serial)
1674

    
1675
  def QueryJobs(self, job_ids, fields):
1676
    """Returns the selected fields for the selected job IDs.
1677

1678
    """
1679
    return self.cl.QueryJobs(job_ids, fields)
1680

    
1681

    
1682
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1683
  def __init__(self, feedback_fn):
1684
    """Initializes this class.
1685

1686
    """
1687
    JobPollReportCbBase.__init__(self)
1688

    
1689
    self.feedback_fn = feedback_fn
1690

    
1691
    assert callable(feedback_fn)
1692

    
1693
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1694
    """Handles a log message.
1695

1696
    """
1697
    self.feedback_fn((timestamp, log_type, log_msg))
1698

    
1699
  def ReportNotChanged(self, job_id, status):
1700
    """Called if a job hasn't changed in a while.
1701

1702
    """
1703
    # Ignore
1704

    
1705

    
1706
class StdioJobPollReportCb(JobPollReportCbBase):
1707
  def __init__(self):
1708
    """Initializes this class.
1709

1710
    """
1711
    JobPollReportCbBase.__init__(self)
1712

    
1713
    self.notified_queued = False
1714
    self.notified_waitlock = False
1715

    
1716
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1717
    """Handles a log message.
1718

1719
    """
1720
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1721
             FormatLogMessage(log_type, log_msg))
1722

    
1723
  def ReportNotChanged(self, job_id, status):
1724
    """Called if a job hasn't changed in a while.
1725

1726
    """
1727
    if status is None:
1728
      return
1729

    
1730
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1731
      ToStderr("Job %s is waiting in queue", job_id)
1732
      self.notified_queued = True
1733

    
1734
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1735
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1736
      self.notified_waitlock = True
1737

    
1738

    
1739
def FormatLogMessage(log_type, log_msg):
1740
  """Formats a job message according to its type.
1741

1742
  """
1743
  if log_type != constants.ELOG_MESSAGE:
1744
    log_msg = str(log_msg)
1745

    
1746
  return utils.SafeEncode(log_msg)
1747

    
1748

    
1749
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1750
  """Function to poll for the result of a job.
1751

1752
  @type job_id: job identified
1753
  @param job_id: the job to poll for results
1754
  @type cl: luxi.Client
1755
  @param cl: the luxi client to use for communicating with the master;
1756
             if None, a new client will be created
1757

1758
  """
1759
  if cl is None:
1760
    cl = GetClient()
1761

    
1762
  if reporter is None:
1763
    if feedback_fn:
1764
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1765
    else:
1766
      reporter = StdioJobPollReportCb()
1767
  elif feedback_fn:
1768
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1769

    
1770
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1771

    
1772

    
1773
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1774
  """Legacy function to submit an opcode.
1775

1776
  This is just a simple wrapper over the construction of the processor
1777
  instance. It should be extended to better handle feedback and
1778
  interaction functions.
1779

1780
  """
1781
  if cl is None:
1782
    cl = GetClient()
1783

    
1784
  SetGenericOpcodeOpts([op], opts)
1785

    
1786
  job_id = SendJob([op], cl=cl)
1787

    
1788
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1789
                       reporter=reporter)
1790

    
1791
  return op_results[0]
1792

    
1793

    
1794
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1795
  """Wrapper around SubmitOpCode or SendJob.
1796

1797
  This function will decide, based on the 'opts' parameter, whether to
1798
  submit and wait for the result of the opcode (and return it), or
1799
  whether to just send the job and print its identifier. It is used in
1800
  order to simplify the implementation of the '--submit' option.
1801

1802
  It will also process the opcodes if we're sending the via SendJob
1803
  (otherwise SubmitOpCode does it).
1804

1805
  """
1806
  if opts and opts.submit_only:
1807
    job = [op]
1808
    SetGenericOpcodeOpts(job, opts)
1809
    job_id = SendJob(job, cl=cl)
1810
    raise JobSubmittedException(job_id)
1811
  else:
1812
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1813

    
1814

    
1815
def SetGenericOpcodeOpts(opcode_list, options):
1816
  """Processor for generic options.
1817

1818
  This function updates the given opcodes based on generic command
1819
  line options (like debug, dry-run, etc.).
1820

1821
  @param opcode_list: list of opcodes
1822
  @param options: command line options or None
1823
  @return: None (in-place modification)
1824

1825
  """
1826
  if not options:
1827
    return
1828
  for op in opcode_list:
1829
    op.debug_level = options.debug
1830
    if hasattr(options, "dry_run"):
1831
      op.dry_run = options.dry_run
1832
    if getattr(options, "priority", None) is not None:
1833
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1834

    
1835

    
1836
def GetClient():
1837
  # TODO: Cache object?
1838
  try:
1839
    client = luxi.Client()
1840
  except luxi.NoMasterError:
1841
    ss = ssconf.SimpleStore()
1842

    
1843
    # Try to read ssconf file
1844
    try:
1845
      ss.GetMasterNode()
1846
    except errors.ConfigurationError:
1847
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1848
                                 " not part of a cluster")
1849

    
1850
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1851
    if master != myself:
1852
      raise errors.OpPrereqError("This is not the master node, please connect"
1853
                                 " to node '%s' and rerun the command" %
1854
                                 master)
1855
    raise
1856
  return client
1857

    
1858

    
1859
def FormatError(err):
1860
  """Return a formatted error message for a given error.
1861

1862
  This function takes an exception instance and returns a tuple
1863
  consisting of two values: first, the recommended exit code, and
1864
  second, a string describing the error message (not
1865
  newline-terminated).
1866

1867
  """
1868
  retcode = 1
1869
  obuf = StringIO()
1870
  msg = str(err)
1871
  if isinstance(err, errors.ConfigurationError):
1872
    txt = "Corrupt configuration file: %s" % msg
1873
    logging.error(txt)
1874
    obuf.write(txt + "\n")
1875
    obuf.write("Aborting.")
1876
    retcode = 2
1877
  elif isinstance(err, errors.HooksAbort):
1878
    obuf.write("Failure: hooks execution failed:\n")
1879
    for node, script, out in err.args[0]:
1880
      if out:
1881
        obuf.write("  node: %s, script: %s, output: %s\n" %
1882
                   (node, script, out))
1883
      else:
1884
        obuf.write("  node: %s, script: %s (no output)\n" %
1885
                   (node, script))
1886
  elif isinstance(err, errors.HooksFailure):
1887
    obuf.write("Failure: hooks general failure: %s" % msg)
1888
  elif isinstance(err, errors.ResolverError):
1889
    this_host = netutils.Hostname.GetSysName()
1890
    if err.args[0] == this_host:
1891
      msg = "Failure: can't resolve my own hostname ('%s')"
1892
    else:
1893
      msg = "Failure: can't resolve hostname '%s'"
1894
    obuf.write(msg % err.args[0])
1895
  elif isinstance(err, errors.OpPrereqError):
1896
    if len(err.args) == 2:
1897
      obuf.write("Failure: prerequisites not met for this"
1898
               " operation:\nerror type: %s, error details:\n%s" %
1899
                 (err.args[1], err.args[0]))
1900
    else:
1901
      obuf.write("Failure: prerequisites not met for this"
1902
                 " operation:\n%s" % msg)
1903
  elif isinstance(err, errors.OpExecError):
1904
    obuf.write("Failure: command execution error:\n%s" % msg)
1905
  elif isinstance(err, errors.TagError):
1906
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1907
  elif isinstance(err, errors.JobQueueDrainError):
1908
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1909
               " accept new requests\n")
1910
  elif isinstance(err, errors.JobQueueFull):
1911
    obuf.write("Failure: the job queue is full and doesn't accept new"
1912
               " job submissions until old jobs are archived\n")
1913
  elif isinstance(err, errors.TypeEnforcementError):
1914
    obuf.write("Parameter Error: %s" % msg)
1915
  elif isinstance(err, errors.ParameterError):
1916
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1917
  elif isinstance(err, luxi.NoMasterError):
1918
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1919
               " and listening for connections?")
1920
  elif isinstance(err, luxi.TimeoutError):
1921
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1922
               " been submitted and will continue to run even if the call"
1923
               " timed out. Useful commands in this situation are \"gnt-job"
1924
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1925
    obuf.write(msg)
1926
  elif isinstance(err, luxi.PermissionError):
1927
    obuf.write("It seems you don't have permissions to connect to the"
1928
               " master daemon.\nPlease retry as a different user.")
1929
  elif isinstance(err, luxi.ProtocolError):
1930
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1931
               "%s" % msg)
1932
  elif isinstance(err, errors.JobLost):
1933
    obuf.write("Error checking job status: %s" % msg)
1934
  elif isinstance(err, errors.QueryFilterParseError):
1935
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1936
    obuf.write("\n".join(err.GetDetails()))
1937
  elif isinstance(err, errors.GenericError):
1938
    obuf.write("Unhandled Ganeti error: %s" % msg)
1939
  elif isinstance(err, JobSubmittedException):
1940
    obuf.write("JobID: %s\n" % err.args[0])
1941
    retcode = 0
1942
  else:
1943
    obuf.write("Unhandled exception: %s" % msg)
1944
  return retcode, obuf.getvalue().rstrip('\n')
1945

    
1946

    
1947
def GenericMain(commands, override=None, aliases=None):
1948
  """Generic main function for all the gnt-* commands.
1949

1950
  Arguments:
1951
    - commands: a dictionary with a special structure, see the design doc
1952
                for command line handling.
1953
    - override: if not None, we expect a dictionary with keys that will
1954
                override command line options; this can be used to pass
1955
                options from the scripts to generic functions
1956
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1957

1958
  """
1959
  # save the program name and the entire command line for later logging
1960
  if sys.argv:
1961
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1962
    if len(sys.argv) >= 2:
1963
      binary += " " + sys.argv[1]
1964
      old_cmdline = " ".join(sys.argv[2:])
1965
    else:
1966
      old_cmdline = ""
1967
  else:
1968
    binary = "<unknown program>"
1969
    old_cmdline = ""
1970

    
1971
  if aliases is None:
1972
    aliases = {}
1973

    
1974
  try:
1975
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1976
  except errors.ParameterError, err:
1977
    result, err_msg = FormatError(err)
1978
    ToStderr(err_msg)
1979
    return 1
1980

    
1981
  if func is None: # parse error
1982
    return 1
1983

    
1984
  if override is not None:
1985
    for key, val in override.iteritems():
1986
      setattr(options, key, val)
1987

    
1988
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1989
                     stderr_logging=True)
1990

    
1991
  if old_cmdline:
1992
    logging.info("run with arguments '%s'", old_cmdline)
1993
  else:
1994
    logging.info("run with no arguments")
1995

    
1996
  try:
1997
    result = func(options, args)
1998
  except (errors.GenericError, luxi.ProtocolError,
1999
          JobSubmittedException), err:
2000
    result, err_msg = FormatError(err)
2001
    logging.exception("Error during command processing")
2002
    ToStderr(err_msg)
2003
  except KeyboardInterrupt:
2004
    result = constants.EXIT_FAILURE
2005
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2006
             " might have been submitted and"
2007
             " will continue to run in the background.")
2008
  except IOError, err:
2009
    if err.errno == errno.EPIPE:
2010
      # our terminal went away, we'll exit
2011
      sys.exit(constants.EXIT_FAILURE)
2012
    else:
2013
      raise
2014

    
2015
  return result
2016

    
2017

    
2018
def ParseNicOption(optvalue):
2019
  """Parses the value of the --net option(s).
2020

2021
  """
2022
  try:
2023
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2024
  except (TypeError, ValueError), err:
2025
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2026

    
2027
  nics = [{}] * nic_max
2028
  for nidx, ndict in optvalue:
2029
    nidx = int(nidx)
2030

    
2031
    if not isinstance(ndict, dict):
2032
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2033
                                 " got %s" % (nidx, ndict))
2034

    
2035
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2036

    
2037
    nics[nidx] = ndict
2038

    
2039
  return nics
2040

    
2041

    
2042
def GenericInstanceCreate(mode, opts, args):
2043
  """Add an instance to the cluster via either creation or import.
2044

2045
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2046
  @param opts: the command line options selected by the user
2047
  @type args: list
2048
  @param args: should contain only one element, the new instance name
2049
  @rtype: int
2050
  @return: the desired exit code
2051

2052
  """
2053
  instance = args[0]
2054

    
2055
  (pnode, snode) = SplitNodeOption(opts.node)
2056

    
2057
  hypervisor = None
2058
  hvparams = {}
2059
  if opts.hypervisor:
2060
    hypervisor, hvparams = opts.hypervisor
2061

    
2062
  if opts.nics:
2063
    nics = ParseNicOption(opts.nics)
2064
  elif opts.no_nics:
2065
    # no nics
2066
    nics = []
2067
  elif mode == constants.INSTANCE_CREATE:
2068
    # default of one nic, all auto
2069
    nics = [{}]
2070
  else:
2071
    # mode == import
2072
    nics = []
2073

    
2074
  if opts.disk_template == constants.DT_DISKLESS:
2075
    if opts.disks or opts.sd_size is not None:
2076
      raise errors.OpPrereqError("Diskless instance but disk"
2077
                                 " information passed")
2078
    disks = []
2079
  else:
2080
    if (not opts.disks and not opts.sd_size
2081
        and mode == constants.INSTANCE_CREATE):
2082
      raise errors.OpPrereqError("No disk information specified")
2083
    if opts.disks and opts.sd_size is not None:
2084
      raise errors.OpPrereqError("Please use either the '--disk' or"
2085
                                 " '-s' option")
2086
    if opts.sd_size is not None:
2087
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2088

    
2089
    if opts.disks:
2090
      try:
2091
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2092
      except ValueError, err:
2093
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2094
      disks = [{}] * disk_max
2095
    else:
2096
      disks = []
2097
    for didx, ddict in opts.disks:
2098
      didx = int(didx)
2099
      if not isinstance(ddict, dict):
2100
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2101
        raise errors.OpPrereqError(msg)
2102
      elif constants.IDISK_SIZE in ddict:
2103
        if constants.IDISK_ADOPT in ddict:
2104
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2105
                                     " (disk %d)" % didx)
2106
        try:
2107
          ddict[constants.IDISK_SIZE] = \
2108
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2109
        except ValueError, err:
2110
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2111
                                     (didx, err))
2112
      elif constants.IDISK_ADOPT in ddict:
2113
        if mode == constants.INSTANCE_IMPORT:
2114
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2115
                                     " import")
2116
        ddict[constants.IDISK_SIZE] = 0
2117
      else:
2118
        raise errors.OpPrereqError("Missing size or adoption source for"
2119
                                   " disk %d" % didx)
2120
      disks[didx] = ddict
2121

    
2122
  if opts.tags is not None:
2123
    tags = opts.tags.split(",")
2124
  else:
2125
    tags = []
2126

    
2127
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2128
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2129

    
2130
  if mode == constants.INSTANCE_CREATE:
2131
    start = opts.start
2132
    os_type = opts.os
2133
    force_variant = opts.force_variant
2134
    src_node = None
2135
    src_path = None
2136
    no_install = opts.no_install
2137
    identify_defaults = False
2138
  elif mode == constants.INSTANCE_IMPORT:
2139
    start = False
2140
    os_type = None
2141
    force_variant = False
2142
    src_node = opts.src_node
2143
    src_path = opts.src_dir
2144
    no_install = None
2145
    identify_defaults = opts.identify_defaults
2146
  else:
2147
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2148

    
2149
  op = opcodes.OpInstanceCreate(instance_name=instance,
2150
                                disks=disks,
2151
                                disk_template=opts.disk_template,
2152
                                nics=nics,
2153
                                pnode=pnode, snode=snode,
2154
                                ip_check=opts.ip_check,
2155
                                name_check=opts.name_check,
2156
                                wait_for_sync=opts.wait_for_sync,
2157
                                file_storage_dir=opts.file_storage_dir,
2158
                                file_driver=opts.file_driver,
2159
                                iallocator=opts.iallocator,
2160
                                hypervisor=hypervisor,
2161
                                hvparams=hvparams,
2162
                                beparams=opts.beparams,
2163
                                osparams=opts.osparams,
2164
                                mode=mode,
2165
                                start=start,
2166
                                os_type=os_type,
2167
                                force_variant=force_variant,
2168
                                src_node=src_node,
2169
                                src_path=src_path,
2170
                                tags=tags,
2171
                                no_install=no_install,
2172
                                identify_defaults=identify_defaults)
2173

    
2174
  SubmitOrSend(op, opts)
2175
  return 0
2176

    
2177

    
2178
class _RunWhileClusterStoppedHelper:
2179
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2180

2181
  """
2182
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2183
    """Initializes this class.
2184

2185
    @type feedback_fn: callable
2186
    @param feedback_fn: Feedback function
2187
    @type cluster_name: string
2188
    @param cluster_name: Cluster name
2189
    @type master_node: string
2190
    @param master_node Master node name
2191
    @type online_nodes: list
2192
    @param online_nodes: List of names of online nodes
2193

2194
    """
2195
    self.feedback_fn = feedback_fn
2196
    self.cluster_name = cluster_name
2197
    self.master_node = master_node
2198
    self.online_nodes = online_nodes
2199

    
2200
    self.ssh = ssh.SshRunner(self.cluster_name)
2201

    
2202
    self.nonmaster_nodes = [name for name in online_nodes
2203
                            if name != master_node]
2204

    
2205
    assert self.master_node not in self.nonmaster_nodes
2206

    
2207
  def _RunCmd(self, node_name, cmd):
2208
    """Runs a command on the local or a remote machine.
2209

2210
    @type node_name: string
2211
    @param node_name: Machine name
2212
    @type cmd: list
2213
    @param cmd: Command
2214

2215
    """
2216
    if node_name is None or node_name == self.master_node:
2217
      # No need to use SSH
2218
      result = utils.RunCmd(cmd)
2219
    else:
2220
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2221

    
2222
    if result.failed:
2223
      errmsg = ["Failed to run command %s" % result.cmd]
2224
      if node_name:
2225
        errmsg.append("on node %s" % node_name)
2226
      errmsg.append(": exitcode %s and error %s" %
2227
                    (result.exit_code, result.output))
2228
      raise errors.OpExecError(" ".join(errmsg))
2229

    
2230
  def Call(self, fn, *args):
2231
    """Call function while all daemons are stopped.
2232

2233
    @type fn: callable
2234
    @param fn: Function to be called
2235

2236
    """
2237
    # Pause watcher by acquiring an exclusive lock on watcher state file
2238
    self.feedback_fn("Blocking watcher")
2239
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2240
    try:
2241
      # TODO: Currently, this just blocks. There's no timeout.
2242
      # TODO: Should it be a shared lock?
2243
      watcher_block.Exclusive(blocking=True)
2244

    
2245
      # Stop master daemons, so that no new jobs can come in and all running
2246
      # ones are finished
2247
      self.feedback_fn("Stopping master daemons")
2248
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2249
      try:
2250
        # Stop daemons on all nodes
2251
        for node_name in self.online_nodes:
2252
          self.feedback_fn("Stopping daemons on %s" % node_name)
2253
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2254

    
2255
        # All daemons are shut down now
2256
        try:
2257
          return fn(self, *args)
2258
        except Exception, err:
2259
          _, errmsg = FormatError(err)
2260
          logging.exception("Caught exception")
2261
          self.feedback_fn(errmsg)
2262
          raise
2263
      finally:
2264
        # Start cluster again, master node last
2265
        for node_name in self.nonmaster_nodes + [self.master_node]:
2266
          self.feedback_fn("Starting daemons on %s" % node_name)
2267
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2268
    finally:
2269
      # Resume watcher
2270
      watcher_block.Close()
2271

    
2272

    
2273
def RunWhileClusterStopped(feedback_fn, fn, *args):
2274
  """Calls a function while all cluster daemons are stopped.
2275

2276
  @type feedback_fn: callable
2277
  @param feedback_fn: Feedback function
2278
  @type fn: callable
2279
  @param fn: Function to be called when daemons are stopped
2280

2281
  """
2282
  feedback_fn("Gathering cluster information")
2283

    
2284
  # This ensures we're running on the master daemon
2285
  cl = GetClient()
2286

    
2287
  (cluster_name, master_node) = \
2288
    cl.QueryConfigValues(["cluster_name", "master_node"])
2289

    
2290
  online_nodes = GetOnlineNodes([], cl=cl)
2291

    
2292
  # Don't keep a reference to the client. The master daemon will go away.
2293
  del cl
2294

    
2295
  assert master_node in online_nodes
2296

    
2297
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2298
                                       online_nodes).Call(fn, *args)
2299

    
2300

    
2301
def GenerateTable(headers, fields, separator, data,
2302
                  numfields=None, unitfields=None,
2303
                  units=None):
2304
  """Prints a table with headers and different fields.
2305

2306
  @type headers: dict
2307
  @param headers: dictionary mapping field names to headers for
2308
      the table
2309
  @type fields: list
2310
  @param fields: the field names corresponding to each row in
2311
      the data field
2312
  @param separator: the separator to be used; if this is None,
2313
      the default 'smart' algorithm is used which computes optimal
2314
      field width, otherwise just the separator is used between
2315
      each field
2316
  @type data: list
2317
  @param data: a list of lists, each sublist being one row to be output
2318
  @type numfields: list
2319
  @param numfields: a list with the fields that hold numeric
2320
      values and thus should be right-aligned
2321
  @type unitfields: list
2322
  @param unitfields: a list with the fields that hold numeric
2323
      values that should be formatted with the units field
2324
  @type units: string or None
2325
  @param units: the units we should use for formatting, or None for
2326
      automatic choice (human-readable for non-separator usage, otherwise
2327
      megabytes); this is a one-letter string
2328

2329
  """
2330
  if units is None:
2331
    if separator:
2332
      units = "m"
2333
    else:
2334
      units = "h"
2335

    
2336
  if numfields is None:
2337
    numfields = []
2338
  if unitfields is None:
2339
    unitfields = []
2340

    
2341
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2342
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2343

    
2344
  format_fields = []
2345
  for field in fields:
2346
    if headers and field not in headers:
2347
      # TODO: handle better unknown fields (either revert to old
2348
      # style of raising exception, or deal more intelligently with
2349
      # variable fields)
2350
      headers[field] = field
2351
    if separator is not None:
2352
      format_fields.append("%s")
2353
    elif numfields.Matches(field):
2354
      format_fields.append("%*s")
2355
    else:
2356
      format_fields.append("%-*s")
2357

    
2358
  if separator is None:
2359
    mlens = [0 for name in fields]
2360
    format_str = ' '.join(format_fields)
2361
  else:
2362
    format_str = separator.replace("%", "%%").join(format_fields)
2363

    
2364
  for row in data:
2365
    if row is None:
2366
      continue
2367
    for idx, val in enumerate(row):
2368
      if unitfields.Matches(fields[idx]):
2369
        try:
2370
          val = int(val)
2371
        except (TypeError, ValueError):
2372
          pass
2373
        else:
2374
          val = row[idx] = utils.FormatUnit(val, units)
2375
      val = row[idx] = str(val)
2376
      if separator is None:
2377
        mlens[idx] = max(mlens[idx], len(val))
2378

    
2379
  result = []
2380
  if headers:
2381
    args = []
2382
    for idx, name in enumerate(fields):
2383
      hdr = headers[name]
2384
      if separator is None:
2385
        mlens[idx] = max(mlens[idx], len(hdr))
2386
        args.append(mlens[idx])
2387
      args.append(hdr)
2388
    result.append(format_str % tuple(args))
2389

    
2390
  if separator is None:
2391
    assert len(mlens) == len(fields)
2392

    
2393
    if fields and not numfields.Matches(fields[-1]):
2394
      mlens[-1] = 0
2395

    
2396
  for line in data:
2397
    args = []
2398
    if line is None:
2399
      line = ['-' for _ in fields]
2400
    for idx in range(len(fields)):
2401
      if separator is None:
2402
        args.append(mlens[idx])
2403
      args.append(line[idx])
2404
    result.append(format_str % tuple(args))
2405

    
2406
  return result
2407

    
2408

    
2409
def _FormatBool(value):
2410
  """Formats a boolean value as a string.
2411

2412
  """
2413
  if value:
2414
    return "Y"
2415
  return "N"
2416

    
2417

    
2418
#: Default formatting for query results; (callback, align right)
2419
_DEFAULT_FORMAT_QUERY = {
2420
  constants.QFT_TEXT: (str, False),
2421
  constants.QFT_BOOL: (_FormatBool, False),
2422
  constants.QFT_NUMBER: (str, True),
2423
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2424
  constants.QFT_OTHER: (str, False),
2425
  constants.QFT_UNKNOWN: (str, False),
2426
  }
2427

    
2428

    
2429
def _GetColumnFormatter(fdef, override, unit):
2430
  """Returns formatting function for a field.
2431

2432
  @type fdef: L{objects.QueryFieldDefinition}
2433
  @type override: dict
2434
  @param override: Dictionary for overriding field formatting functions,
2435
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2436
  @type unit: string
2437
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2438
  @rtype: tuple; (callable, bool)
2439
  @return: Returns the function to format a value (takes one parameter) and a
2440
    boolean for aligning the value on the right-hand side
2441

2442
  """
2443
  fmt = override.get(fdef.name, None)
2444
  if fmt is not None:
2445
    return fmt
2446

    
2447
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2448

    
2449
  if fdef.kind == constants.QFT_UNIT:
2450
    # Can't keep this information in the static dictionary
2451
    return (lambda value: utils.FormatUnit(value, unit), True)
2452

    
2453
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2454
  if fmt is not None:
2455
    return fmt
2456

    
2457
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2458

    
2459

    
2460
class _QueryColumnFormatter:
2461
  """Callable class for formatting fields of a query.
2462

2463
  """
2464
  def __init__(self, fn, status_fn, verbose):
2465
    """Initializes this class.
2466

2467
    @type fn: callable
2468
    @param fn: Formatting function
2469
    @type status_fn: callable
2470
    @param status_fn: Function to report fields' status
2471
    @type verbose: boolean
2472
    @param verbose: whether to use verbose field descriptions or not
2473

2474
    """
2475
    self._fn = fn
2476
    self._status_fn = status_fn
2477
    self._verbose = verbose
2478

    
2479
  def __call__(self, data):
2480
    """Returns a field's string representation.
2481

2482
    """
2483
    (status, value) = data
2484

    
2485
    # Report status
2486
    self._status_fn(status)
2487

    
2488
    if status == constants.RS_NORMAL:
2489
      return self._fn(value)
2490

    
2491
    assert value is None, \
2492
           "Found value %r for abnormal status %s" % (value, status)
2493

    
2494
    return FormatResultError(status, self._verbose)
2495

    
2496

    
2497
def FormatResultError(status, verbose):
2498
  """Formats result status other than L{constants.RS_NORMAL}.
2499

2500
  @param status: The result status
2501
  @type verbose: boolean
2502
  @param verbose: Whether to return the verbose text
2503
  @return: Text of result status
2504

2505
  """
2506
  assert status != constants.RS_NORMAL, \
2507
         "FormatResultError called with status equal to constants.RS_NORMAL"
2508
  try:
2509
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2510
  except KeyError:
2511
    raise NotImplementedError("Unknown status %s" % status)
2512
  else:
2513
    if verbose:
2514
      return verbose_text
2515
    return normal_text
2516

    
2517

    
2518
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2519
                      header=False, verbose=False):
2520
  """Formats data in L{objects.QueryResponse}.
2521

2522
  @type result: L{objects.QueryResponse}
2523
  @param result: result of query operation
2524
  @type unit: string
2525
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2526
    see L{utils.text.FormatUnit}
2527
  @type format_override: dict
2528
  @param format_override: Dictionary for overriding field formatting functions,
2529
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2530
  @type separator: string or None
2531
  @param separator: String used to separate fields
2532
  @type header: bool
2533
  @param header: Whether to output header row
2534
  @type verbose: boolean
2535
  @param verbose: whether to use verbose field descriptions or not
2536

2537
  """
2538
  if unit is None:
2539
    if separator:
2540
      unit = "m"
2541
    else:
2542
      unit = "h"
2543

    
2544
  if format_override is None:
2545
    format_override = {}
2546

    
2547
  stats = dict.fromkeys(constants.RS_ALL, 0)
2548

    
2549
  def _RecordStatus(status):
2550
    if status in stats:
2551
      stats[status] += 1
2552

    
2553
  columns = []
2554
  for fdef in result.fields:
2555
    assert fdef.title and fdef.name
2556
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2557
    columns.append(TableColumn(fdef.title,
2558
                               _QueryColumnFormatter(fn, _RecordStatus,
2559
                                                     verbose),
2560
                               align_right))
2561

    
2562
  table = FormatTable(result.data, columns, header, separator)
2563

    
2564
  # Collect statistics
2565
  assert len(stats) == len(constants.RS_ALL)
2566
  assert compat.all(count >= 0 for count in stats.values())
2567

    
2568
  # Determine overall status. If there was no data, unknown fields must be
2569
  # detected via the field definitions.
2570
  if (stats[constants.RS_UNKNOWN] or
2571
      (not result.data and _GetUnknownFields(result.fields))):
2572
    status = QR_UNKNOWN
2573
  elif compat.any(count > 0 for key, count in stats.items()
2574
                  if key != constants.RS_NORMAL):
2575
    status = QR_INCOMPLETE
2576
  else:
2577
    status = QR_NORMAL
2578

    
2579
  return (status, table)
2580

    
2581

    
2582
def _GetUnknownFields(fdefs):
2583
  """Returns list of unknown fields included in C{fdefs}.
2584

2585
  @type fdefs: list of L{objects.QueryFieldDefinition}
2586

2587
  """
2588
  return [fdef for fdef in fdefs
2589
          if fdef.kind == constants.QFT_UNKNOWN]
2590

    
2591

    
2592
def _WarnUnknownFields(fdefs):
2593
  """Prints a warning to stderr if a query included unknown fields.
2594

2595
  @type fdefs: list of L{objects.QueryFieldDefinition}
2596

2597
  """
2598
  unknown = _GetUnknownFields(fdefs)
2599
  if unknown:
2600
    ToStderr("Warning: Queried for unknown fields %s",
2601
             utils.CommaJoin(fdef.name for fdef in unknown))
2602
    return True
2603

    
2604
  return False
2605

    
2606

    
2607
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2608
                format_override=None, verbose=False, force_filter=False):
2609
  """Generic implementation for listing all items of a resource.
2610

2611
  @param resource: One of L{constants.QR_VIA_LUXI}
2612
  @type fields: list of strings
2613
  @param fields: List of fields to query for
2614
  @type names: list of strings
2615
  @param names: Names of items to query for
2616
  @type unit: string or None
2617
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2618
    None for automatic choice (human-readable for non-separator usage,
2619
    otherwise megabytes); this is a one-letter string
2620
  @type separator: string or None
2621
  @param separator: String used to separate fields
2622
  @type header: bool
2623
  @param header: Whether to show header row
2624
  @type force_filter: bool
2625
  @param force_filter: Whether to always treat names as filter
2626
  @type format_override: dict
2627
  @param format_override: Dictionary for overriding field formatting functions,
2628
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2629
  @type verbose: boolean
2630
  @param verbose: whether to use verbose field descriptions or not
2631

2632
  """
2633
  if cl is None:
2634
    cl = GetClient()
2635

    
2636
  if not names:
2637
    names = None
2638

    
2639
  if (force_filter or
2640
      (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
2641
    try:
2642
      (filter_text, ) = names
2643
    except ValueError:
2644
      raise errors.OpPrereqError("Exactly one argument must be given as a"
2645
                                 " filter")
2646

    
2647
    logging.debug("Parsing '%s' as filter", filter_text)
2648
    filter_ = qlang.ParseFilter(filter_text)
2649
  else:
2650
    filter_ = qlang.MakeSimpleFilter("name", names)
2651

    
2652
  response = cl.Query(resource, fields, filter_)
2653

    
2654
  found_unknown = _WarnUnknownFields(response.fields)
2655

    
2656
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2657
                                     header=header,
2658
                                     format_override=format_override,
2659
                                     verbose=verbose)
2660

    
2661
  for line in data:
2662
    ToStdout(line)
2663

    
2664
  assert ((found_unknown and status == QR_UNKNOWN) or
2665
          (not found_unknown and status != QR_UNKNOWN))
2666

    
2667
  if status == QR_UNKNOWN:
2668
    return constants.EXIT_UNKNOWN_FIELD
2669

    
2670
  # TODO: Should the list command fail if not all data could be collected?
2671
  return constants.EXIT_SUCCESS
2672

    
2673

    
2674
def GenericListFields(resource, fields, separator, header, cl=None):
2675
  """Generic implementation for listing fields for a resource.
2676

2677
  @param resource: One of L{constants.QR_VIA_LUXI}
2678
  @type fields: list of strings
2679
  @param fields: List of fields to query for
2680
  @type separator: string or None
2681
  @param separator: String used to separate fields
2682
  @type header: bool
2683
  @param header: Whether to show header row
2684

2685
  """
2686
  if cl is None:
2687
    cl = GetClient()
2688

    
2689
  if not fields:
2690
    fields = None
2691

    
2692
  response = cl.QueryFields(resource, fields)
2693

    
2694
  found_unknown = _WarnUnknownFields(response.fields)
2695

    
2696
  columns = [
2697
    TableColumn("Name", str, False),
2698
    TableColumn("Title", str, False),
2699
    TableColumn("Description", str, False),
2700
    ]
2701

    
2702
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2703

    
2704
  for line in FormatTable(rows, columns, header, separator):
2705
    ToStdout(line)
2706

    
2707
  if found_unknown:
2708
    return constants.EXIT_UNKNOWN_FIELD
2709

    
2710
  return constants.EXIT_SUCCESS
2711

    
2712

    
2713
class TableColumn:
2714
  """Describes a column for L{FormatTable}.
2715

2716
  """
2717
  def __init__(self, title, fn, align_right):
2718
    """Initializes this class.
2719

2720
    @type title: string
2721
    @param title: Column title
2722
    @type fn: callable
2723
    @param fn: Formatting function
2724
    @type align_right: bool
2725
    @param align_right: Whether to align values on the right-hand side
2726

2727
    """
2728
    self.title = title
2729
    self.format = fn
2730
    self.align_right = align_right
2731

    
2732

    
2733
def _GetColFormatString(width, align_right):
2734
  """Returns the format string for a field.
2735

2736
  """
2737
  if align_right:
2738
    sign = ""
2739
  else:
2740
    sign = "-"
2741

    
2742
  return "%%%s%ss" % (sign, width)
2743

    
2744

    
2745
def FormatTable(rows, columns, header, separator):
2746
  """Formats data as a table.
2747

2748
  @type rows: list of lists
2749
  @param rows: Row data, one list per row
2750
  @type columns: list of L{TableColumn}
2751
  @param columns: Column descriptions
2752
  @type header: bool
2753
  @param header: Whether to show header row
2754
  @type separator: string or None
2755
  @param separator: String used to separate columns
2756

2757
  """
2758
  if header:
2759
    data = [[col.title for col in columns]]
2760
    colwidth = [len(col.title) for col in columns]
2761
  else:
2762
    data = []
2763
    colwidth = [0 for _ in columns]
2764

    
2765
  # Format row data
2766
  for row in rows:
2767
    assert len(row) == len(columns)
2768

    
2769
    formatted = [col.format(value) for value, col in zip(row, columns)]
2770

    
2771
    if separator is None:
2772
      # Update column widths
2773
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2774
        # Modifying a list's items while iterating is fine
2775
        colwidth[idx] = max(oldwidth, len(value))
2776

    
2777
    data.append(formatted)
2778

    
2779
  if separator is not None:
2780
    # Return early if a separator is used
2781
    return [separator.join(row) for row in data]
2782

    
2783
  if columns and not columns[-1].align_right:
2784
    # Avoid unnecessary spaces at end of line
2785
    colwidth[-1] = 0
2786

    
2787
  # Build format string
2788
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2789
                  for col, width in zip(columns, colwidth)])
2790

    
2791
  return [fmt % tuple(row) for row in data]
2792

    
2793

    
2794
def FormatTimestamp(ts):
2795
  """Formats a given timestamp.
2796

2797
  @type ts: timestamp
2798
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2799

2800
  @rtype: string
2801
  @return: a string with the formatted timestamp
2802

2803
  """
2804
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2805
    return '?'
2806
  sec, usec = ts
2807
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2808

    
2809

    
2810
def ParseTimespec(value):
2811
  """Parse a time specification.
2812

2813
  The following suffixed will be recognized:
2814

2815
    - s: seconds
2816
    - m: minutes
2817
    - h: hours
2818
    - d: day
2819
    - w: weeks
2820

2821
  Without any suffix, the value will be taken to be in seconds.
2822

2823
  """
2824
  value = str(value)
2825
  if not value:
2826
    raise errors.OpPrereqError("Empty time specification passed")
2827
  suffix_map = {
2828
    's': 1,
2829
    'm': 60,
2830
    'h': 3600,
2831
    'd': 86400,
2832
    'w': 604800,
2833
    }
2834
  if value[-1] not in suffix_map:
2835
    try:
2836
      value = int(value)
2837
    except (TypeError, ValueError):
2838
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2839
  else:
2840
    multiplier = suffix_map[value[-1]]
2841
    value = value[:-1]
2842
    if not value: # no data left after stripping the suffix
2843
      raise errors.OpPrereqError("Invalid time specification (only"
2844
                                 " suffix passed)")
2845
    try:
2846
      value = int(value) * multiplier
2847
    except (TypeError, ValueError):
2848
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2849
  return value
2850

    
2851

    
2852
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2853
                   filter_master=False):
2854
  """Returns the names of online nodes.
2855

2856
  This function will also log a warning on stderr with the names of
2857
  the online nodes.
2858

2859
  @param nodes: if not empty, use only this subset of nodes (minus the
2860
      offline ones)
2861
  @param cl: if not None, luxi client to use
2862
  @type nowarn: boolean
2863
  @param nowarn: by default, this function will output a note with the
2864
      offline nodes that are skipped; if this parameter is True the
2865
      note is not displayed
2866
  @type secondary_ips: boolean
2867
  @param secondary_ips: if True, return the secondary IPs instead of the
2868
      names, useful for doing network traffic over the replication interface
2869
      (if any)
2870
  @type filter_master: boolean
2871
  @param filter_master: if True, do not return the master node in the list
2872
      (useful in coordination with secondary_ips where we cannot check our
2873
      node name against the list)
2874

2875
  """
2876
  if cl is None:
2877
    cl = GetClient()
2878

    
2879
  if secondary_ips:
2880
    name_idx = 2
2881
  else:
2882
    name_idx = 0
2883

    
2884
  if filter_master:
2885
    master_node = cl.QueryConfigValues(["master_node"])[0]
2886
    filter_fn = lambda x: x != master_node
2887
  else:
2888
    filter_fn = lambda _: True
2889

    
2890
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2891
                         use_locking=False)
2892
  offline = [row[0] for row in result if row[1]]
2893
  if offline and not nowarn:
2894
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2895
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2896

    
2897

    
2898
def _ToStream(stream, txt, *args):
2899
  """Write a message to a stream, bypassing the logging system
2900

2901
  @type stream: file object
2902
  @param stream: the file to which we should write
2903
  @type txt: str
2904
  @param txt: the message
2905

2906
  """
2907
  try:
2908
    if args:
2909
      args = tuple(args)
2910
      stream.write(txt % args)
2911
    else:
2912
      stream.write(txt)
2913
    stream.write('\n')
2914
    stream.flush()
2915
  except IOError, err:
2916
    if err.errno == errno.EPIPE:
2917
      # our terminal went away, we'll exit
2918
      sys.exit(constants.EXIT_FAILURE)
2919
    else:
2920
      raise
2921

    
2922

    
2923
def ToStdout(txt, *args):
2924
  """Write a message to stdout only, bypassing the logging system
2925

2926
  This is just a wrapper over _ToStream.
2927

2928
  @type txt: str
2929
  @param txt: the message
2930

2931
  """
2932
  _ToStream(sys.stdout, txt, *args)
2933

    
2934

    
2935
def ToStderr(txt, *args):
2936
  """Write a message to stderr only, bypassing the logging system
2937

2938
  This is just a wrapper over _ToStream.
2939

2940
  @type txt: str
2941
  @param txt: the message
2942

2943
  """
2944
  _ToStream(sys.stderr, txt, *args)
2945

    
2946

    
2947
class JobExecutor(object):
2948
  """Class which manages the submission and execution of multiple jobs.
2949

2950
  Note that instances of this class should not be reused between
2951
  GetResults() calls.
2952

2953
  """
2954
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2955
    self.queue = []
2956
    if cl is None:
2957
      cl = GetClient()
2958
    self.cl = cl
2959
    self.verbose = verbose
2960
    self.jobs = []
2961
    self.opts = opts
2962
    self.feedback_fn = feedback_fn
2963
    self._counter = itertools.count()
2964

    
2965
  @staticmethod
2966
  def _IfName(name, fmt):
2967
    """Helper function for formatting name.
2968

2969
    """
2970
    if name:
2971
      return fmt % name
2972

    
2973
    return ""
2974

    
2975
  def QueueJob(self, name, *ops):
2976
    """Record a job for later submit.
2977

2978
    @type name: string
2979
    @param name: a description of the job, will be used in WaitJobSet
2980

2981
    """
2982
    SetGenericOpcodeOpts(ops, self.opts)
2983
    self.queue.append((self._counter.next(), name, ops))
2984

    
2985
  def AddJobId(self, name, status, job_id):
2986
    """Adds a job ID to the internal queue.
2987

2988
    """
2989
    self.jobs.append((self._counter.next(), status, job_id, name))
2990

    
2991
  def SubmitPending(self, each=False):
2992
    """Submit all pending jobs.
2993

2994
    """
2995
    if each:
2996
      results = []
2997
      for (_, _, ops) in self.queue:
2998
        # SubmitJob will remove the success status, but raise an exception if
2999
        # the submission fails, so we'll notice that anyway.
3000
        results.append([True, self.cl.SubmitJob(ops)])
3001
    else:
3002
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3003
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3004
      self.jobs.append((idx, status, data, name))
3005

    
3006
  def _ChooseJob(self):
3007
    """Choose a non-waiting/queued job to poll next.
3008

3009
    """
3010
    assert self.jobs, "_ChooseJob called with empty job list"
3011

    
3012
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3013
    assert result
3014

    
3015
    for job_data, status in zip(self.jobs, result):
3016
      if (isinstance(status, list) and status and
3017
          status[0] in (constants.JOB_STATUS_QUEUED,
3018
                        constants.JOB_STATUS_WAITLOCK,
3019
                        constants.JOB_STATUS_CANCELING)):
3020
        # job is still present and waiting
3021
        continue
3022
      # good candidate found (either running job or lost job)
3023
      self.jobs.remove(job_data)
3024
      return job_data
3025

    
3026
    # no job found
3027
    return self.jobs.pop(0)
3028

    
3029
  def GetResults(self):
3030
    """Wait for and return the results of all jobs.
3031

3032
    @rtype: list
3033
    @return: list of tuples (success, job results), in the same order
3034
        as the submitted jobs; if a job has failed, instead of the result
3035
        there will be the error message
3036

3037
    """
3038
    if not self.jobs:
3039
      self.SubmitPending()
3040
    results = []
3041
    if self.verbose:
3042
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3043
      if ok_jobs:
3044
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3045

    
3046
    # first, remove any non-submitted jobs
3047
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3048
    for idx, _, jid, name in failures:
3049
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3050
      results.append((idx, False, jid))
3051

    
3052
    while self.jobs:
3053
      (idx, _, jid, name) = self._ChooseJob()
3054
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3055
      try:
3056
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3057
        success = True
3058
      except errors.JobLost, err:
3059
        _, job_result = FormatError(err)
3060
        ToStderr("Job %s%s has been archived, cannot check its result",
3061
                 jid, self._IfName(name, " for %s"))
3062
        success = False
3063
      except (errors.GenericError, luxi.ProtocolError), err:
3064
        _, job_result = FormatError(err)
3065
        success = False
3066
        # the error message will always be shown, verbose or not
3067
        ToStderr("Job %s%s has failed: %s",
3068
                 jid, self._IfName(name, " for %s"), job_result)
3069

    
3070
      results.append((idx, success, job_result))
3071

    
3072
    # sort based on the index, then drop it
3073
    results.sort()
3074
    results = [i[1:] for i in results]
3075

    
3076
    return results
3077

    
3078
  def WaitOrShow(self, wait):
3079
    """Wait for job results or only print the job IDs.
3080

3081
    @type wait: boolean
3082
    @param wait: whether to wait or not
3083

3084
    """
3085
    if wait:
3086
      return self.GetResults()
3087
    else:
3088
      if not self.jobs:
3089
        self.SubmitPending()
3090
      for _, status, result, name in self.jobs:
3091
        if status:
3092
          ToStdout("%s: %s", result, name)
3093
        else:
3094
          ToStderr("Failure for %s: %s", name, result)
3095
      return [row[1:3] for row in self.jobs]
3096

    
3097

    
3098
def FormatParameterDict(buf, param_dict, actual, level=1):
3099
  """Formats a parameter dictionary.
3100

3101
  @type buf: L{StringIO}
3102
  @param buf: the buffer into which to write
3103
  @type param_dict: dict
3104
  @param param_dict: the own parameters
3105
  @type actual: dict
3106
  @param actual: the current parameter set (including defaults)
3107
  @param level: Level of indent
3108

3109
  """
3110
  indent = "  " * level
3111
  for key in sorted(actual):
3112
    val = param_dict.get(key, "default (%s)" % actual[key])
3113
    buf.write("%s- %s: %s\n" % (indent, key, val))
3114

    
3115

    
3116
def ConfirmOperation(names, list_type, text, extra=""):
3117
  """Ask the user to confirm an operation on a list of list_type.
3118

3119
  This function is used to request confirmation for doing an operation
3120
  on a given list of list_type.
3121

3122
  @type names: list
3123
  @param names: the list of names that we display when
3124
      we ask for confirmation
3125
  @type list_type: str
3126
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3127
  @type text: str
3128
  @param text: the operation that the user should confirm
3129
  @rtype: boolean
3130
  @return: True or False depending on user's confirmation.
3131

3132
  """
3133
  count = len(names)
3134
  msg = ("The %s will operate on %d %s.\n%s"
3135
         "Do you want to continue?" % (text, count, list_type, extra))
3136
  affected = (("\nAffected %s:\n" % list_type) +
3137
              "\n".join(["  %s" % name for name in names]))
3138

    
3139
  choices = [("y", True, "Yes, execute the %s" % text),
3140
             ("n", False, "No, abort the %s" % text)]
3141

    
3142
  if count > 20:
3143
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3144
    question = msg
3145
  else:
3146
    question = msg + affected
3147

    
3148
  choice = AskUser(question, choices)
3149
  if choice == "v":
3150
    choices.pop(1)
3151
    choice = AskUser(msg + affected, choices)
3152
  return choice