Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ a57981c5

History | View | Annotate | Download (101.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
from cStringIO import StringIO
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import opcodes
38
from ganeti import luxi
39
from ganeti import ssconf
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45

    
46
from optparse import (OptionParser, TitledHelpFormatter,
47
                      Option, OptionValueError)
48

    
49

    
50
__all__ = [
51
  # Command line options
52
  "ADD_UIDS_OPT",
53
  "ALLOCATABLE_OPT",
54
  "ALLOC_POLICY_OPT",
55
  "ALL_OPT",
56
  "ALLOW_FAILOVER_OPT",
57
  "AUTO_PROMOTE_OPT",
58
  "AUTO_REPLACE_OPT",
59
  "BACKEND_OPT",
60
  "BLK_OS_OPT",
61
  "CAPAB_MASTER_OPT",
62
  "CAPAB_VM_OPT",
63
  "CLEANUP_OPT",
64
  "CLUSTER_DOMAIN_SECRET_OPT",
65
  "CONFIRM_OPT",
66
  "CP_SIZE_OPT",
67
  "DEBUG_OPT",
68
  "DEBUG_SIMERR_OPT",
69
  "DISKIDX_OPT",
70
  "DISK_OPT",
71
  "DISK_TEMPLATE_OPT",
72
  "DRAINED_OPT",
73
  "DRY_RUN_OPT",
74
  "DRBD_HELPER_OPT",
75
  "DST_NODE_OPT",
76
  "EARLY_RELEASE_OPT",
77
  "ENABLED_HV_OPT",
78
  "ERROR_CODES_OPT",
79
  "FIELDS_OPT",
80
  "FILESTORE_DIR_OPT",
81
  "FILESTORE_DRIVER_OPT",
82
  "FORCE_FILTER_OPT",
83
  "FORCE_OPT",
84
  "FORCE_VARIANT_OPT",
85
  "GLOBAL_FILEDIR_OPT",
86
  "HID_OS_OPT",
87
  "GLOBAL_SHARED_FILEDIR_OPT",
88
  "HVLIST_OPT",
89
  "HVOPTS_OPT",
90
  "HYPERVISOR_OPT",
91
  "IALLOCATOR_OPT",
92
  "DEFAULT_IALLOCATOR_OPT",
93
  "IDENTIFY_DEFAULTS_OPT",
94
  "IGNORE_CONSIST_OPT",
95
  "IGNORE_FAILURES_OPT",
96
  "IGNORE_OFFLINE_OPT",
97
  "IGNORE_REMOVE_FAILURES_OPT",
98
  "IGNORE_SECONDARIES_OPT",
99
  "IGNORE_SIZE_OPT",
100
  "INTERVAL_OPT",
101
  "MAC_PREFIX_OPT",
102
  "MAINTAIN_NODE_HEALTH_OPT",
103
  "MASTER_NETDEV_OPT",
104
  "MC_OPT",
105
  "MIGRATION_MODE_OPT",
106
  "NET_OPT",
107
  "NEW_CLUSTER_CERT_OPT",
108
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109
  "NEW_CONFD_HMAC_KEY_OPT",
110
  "NEW_RAPI_CERT_OPT",
111
  "NEW_SECONDARY_OPT",
112
  "NIC_PARAMS_OPT",
113
  "NODE_FORCE_JOIN_OPT",
114
  "NODE_LIST_OPT",
115
  "NODE_PLACEMENT_OPT",
116
  "NODEGROUP_OPT",
117
  "NODE_PARAMS_OPT",
118
  "NODE_POWERED_OPT",
119
  "NODRBD_STORAGE_OPT",
120
  "NOHDR_OPT",
121
  "NOIPCHECK_OPT",
122
  "NO_INSTALL_OPT",
123
  "NONAMECHECK_OPT",
124
  "NOLVM_STORAGE_OPT",
125
  "NOMODIFY_ETCHOSTS_OPT",
126
  "NOMODIFY_SSH_SETUP_OPT",
127
  "NONICS_OPT",
128
  "NONLIVE_OPT",
129
  "NONPLUS1_OPT",
130
  "NOSHUTDOWN_OPT",
131
  "NOSTART_OPT",
132
  "NOSSH_KEYCHECK_OPT",
133
  "NOVOTING_OPT",
134
  "NO_REMEMBER_OPT",
135
  "NWSYNC_OPT",
136
  "ON_PRIMARY_OPT",
137
  "ON_SECONDARY_OPT",
138
  "OFFLINE_OPT",
139
  "OSPARAMS_OPT",
140
  "OS_OPT",
141
  "OS_SIZE_OPT",
142
  "OOB_TIMEOUT_OPT",
143
  "POWER_DELAY_OPT",
144
  "PREALLOC_WIPE_DISKS_OPT",
145
  "PRIMARY_IP_VERSION_OPT",
146
  "PRIORITY_OPT",
147
  "RAPI_CERT_OPT",
148
  "READD_OPT",
149
  "REBOOT_TYPE_OPT",
150
  "REMOVE_INSTANCE_OPT",
151
  "REMOVE_UIDS_OPT",
152
  "RESERVED_LVS_OPT",
153
  "ROMAN_OPT",
154
  "SECONDARY_IP_OPT",
155
  "SELECT_OS_OPT",
156
  "SEP_OPT",
157
  "SHOWCMD_OPT",
158
  "SHUTDOWN_TIMEOUT_OPT",
159
  "SINGLE_NODE_OPT",
160
  "SRC_DIR_OPT",
161
  "SRC_NODE_OPT",
162
  "SUBMIT_OPT",
163
  "STATIC_OPT",
164
  "SYNC_OPT",
165
  "TAG_ADD_OPT",
166
  "TAG_SRC_OPT",
167
  "TIMEOUT_OPT",
168
  "UIDPOOL_OPT",
169
  "USEUNITS_OPT",
170
  "USE_REPL_NET_OPT",
171
  "VERBOSE_OPT",
172
  "VG_NAME_OPT",
173
  "YES_DOIT_OPT",
174
  # Generic functions for CLI programs
175
  "ConfirmOperation",
176
  "GenericMain",
177
  "GenericInstanceCreate",
178
  "GenericList",
179
  "GenericListFields",
180
  "GetClient",
181
  "GetOnlineNodes",
182
  "JobExecutor",
183
  "JobSubmittedException",
184
  "ParseTimespec",
185
  "RunWhileClusterStopped",
186
  "SubmitOpCode",
187
  "SubmitOrSend",
188
  "UsesRPC",
189
  # Formatting functions
190
  "ToStderr", "ToStdout",
191
  "FormatError",
192
  "FormatQueryResult",
193
  "FormatParameterDict",
194
  "GenerateTable",
195
  "AskUser",
196
  "FormatTimestamp",
197
  "FormatLogMessage",
198
  # Tags functions
199
  "ListTags",
200
  "AddTags",
201
  "RemoveTags",
202
  # command line options support infrastructure
203
  "ARGS_MANY_INSTANCES",
204
  "ARGS_MANY_NODES",
205
  "ARGS_MANY_GROUPS",
206
  "ARGS_NONE",
207
  "ARGS_ONE_INSTANCE",
208
  "ARGS_ONE_NODE",
209
  "ARGS_ONE_GROUP",
210
  "ARGS_ONE_OS",
211
  "ArgChoice",
212
  "ArgCommand",
213
  "ArgFile",
214
  "ArgGroup",
215
  "ArgHost",
216
  "ArgInstance",
217
  "ArgJobId",
218
  "ArgNode",
219
  "ArgOs",
220
  "ArgSuggest",
221
  "ArgUnknown",
222
  "OPT_COMPL_INST_ADD_NODES",
223
  "OPT_COMPL_MANY_NODES",
224
  "OPT_COMPL_ONE_IALLOCATOR",
225
  "OPT_COMPL_ONE_INSTANCE",
226
  "OPT_COMPL_ONE_NODE",
227
  "OPT_COMPL_ONE_NODEGROUP",
228
  "OPT_COMPL_ONE_OS",
229
  "cli_option",
230
  "SplitNodeOption",
231
  "CalculateOSNames",
232
  "ParseFields",
233
  "COMMON_CREATE_OPTS",
234
  ]
235

    
236
NO_PREFIX = "no_"
237
UN_PREFIX = "-"
238

    
239
#: Priorities (sorted)
240
_PRIORITY_NAMES = [
241
  ("low", constants.OP_PRIO_LOW),
242
  ("normal", constants.OP_PRIO_NORMAL),
243
  ("high", constants.OP_PRIO_HIGH),
244
  ]
245

    
246
#: Priority dictionary for easier lookup
247
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
248
# we migrate to Python 2.6
249
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
250

    
251
# Query result status for clients
252
(QR_NORMAL,
253
 QR_UNKNOWN,
254
 QR_INCOMPLETE) = range(3)
255

    
256

    
257
class _Argument:
258
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
259
    self.min = min
260
    self.max = max
261

    
262
  def __repr__(self):
263
    return ("<%s min=%s max=%s>" %
264
            (self.__class__.__name__, self.min, self.max))
265

    
266

    
267
class ArgSuggest(_Argument):
268
  """Suggesting argument.
269

270
  Value can be any of the ones passed to the constructor.
271

272
  """
273
  # pylint: disable-msg=W0622
274
  def __init__(self, min=0, max=None, choices=None):
275
    _Argument.__init__(self, min=min, max=max)
276
    self.choices = choices
277

    
278
  def __repr__(self):
279
    return ("<%s min=%s max=%s choices=%r>" %
280
            (self.__class__.__name__, self.min, self.max, self.choices))
281

    
282

    
283
class ArgChoice(ArgSuggest):
284
  """Choice argument.
285

286
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
287
  but value must be one of the choices.
288

289
  """
290

    
291

    
292
class ArgUnknown(_Argument):
293
  """Unknown argument to program (e.g. determined at runtime).
294

295
  """
296

    
297

    
298
class ArgInstance(_Argument):
299
  """Instances argument.
300

301
  """
302

    
303

    
304
class ArgNode(_Argument):
305
  """Node argument.
306

307
  """
308

    
309

    
310
class ArgGroup(_Argument):
311
  """Node group argument.
312

313
  """
314

    
315

    
316
class ArgJobId(_Argument):
317
  """Job ID argument.
318

319
  """
320

    
321

    
322
class ArgFile(_Argument):
323
  """File path argument.
324

325
  """
326

    
327

    
328
class ArgCommand(_Argument):
329
  """Command argument.
330

331
  """
332

    
333

    
334
class ArgHost(_Argument):
335
  """Host argument.
336

337
  """
338

    
339

    
340
class ArgOs(_Argument):
341
  """OS argument.
342

343
  """
344

    
345

    
346
ARGS_NONE = []
347
ARGS_MANY_INSTANCES = [ArgInstance()]
348
ARGS_MANY_NODES = [ArgNode()]
349
ARGS_MANY_GROUPS = [ArgGroup()]
350
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
351
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
352
# TODO
353
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
354
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
355

    
356

    
357
def _ExtractTagsObject(opts, args):
358
  """Extract the tag type object.
359

360
  Note that this function will modify its args parameter.
361

362
  """
363
  if not hasattr(opts, "tag_type"):
364
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
365
  kind = opts.tag_type
366
  if kind == constants.TAG_CLUSTER:
367
    retval = kind, kind
368
  elif kind in (constants.TAG_NODEGROUP,
369
                constants.TAG_NODE,
370
                constants.TAG_INSTANCE):
371
    if not args:
372
      raise errors.OpPrereqError("no arguments passed to the command")
373
    name = args.pop(0)
374
    retval = kind, name
375
  else:
376
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
377
  return retval
378

    
379

    
380
def _ExtendTags(opts, args):
381
  """Extend the args if a source file has been given.
382

383
  This function will extend the tags with the contents of the file
384
  passed in the 'tags_source' attribute of the opts parameter. A file
385
  named '-' will be replaced by stdin.
386

387
  """
388
  fname = opts.tags_source
389
  if fname is None:
390
    return
391
  if fname == "-":
392
    new_fh = sys.stdin
393
  else:
394
    new_fh = open(fname, "r")
395
  new_data = []
396
  try:
397
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
398
    # because of python bug 1633941
399
    while True:
400
      line = new_fh.readline()
401
      if not line:
402
        break
403
      new_data.append(line.strip())
404
  finally:
405
    new_fh.close()
406
  args.extend(new_data)
407

    
408

    
409
def ListTags(opts, args):
410
  """List the tags on a given object.
411

412
  This is a generic implementation that knows how to deal with all
413
  three cases of tag objects (cluster, node, instance). The opts
414
  argument is expected to contain a tag_type field denoting what
415
  object type we work on.
416

417
  """
418
  kind, name = _ExtractTagsObject(opts, args)
419
  cl = GetClient()
420
  result = cl.QueryTags(kind, name)
421
  result = list(result)
422
  result.sort()
423
  for tag in result:
424
    ToStdout(tag)
425

    
426

    
427
def AddTags(opts, args):
428
  """Add tags on a given object.
429

430
  This is a generic implementation that knows how to deal with all
431
  three cases of tag objects (cluster, node, instance). The opts
432
  argument is expected to contain a tag_type field denoting what
433
  object type we work on.
434

435
  """
436
  kind, name = _ExtractTagsObject(opts, args)
437
  _ExtendTags(opts, args)
438
  if not args:
439
    raise errors.OpPrereqError("No tags to be added")
440
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
441
  SubmitOpCode(op, opts=opts)
442

    
443

    
444
def RemoveTags(opts, args):
445
  """Remove tags from a given object.
446

447
  This is a generic implementation that knows how to deal with all
448
  three cases of tag objects (cluster, node, instance). The opts
449
  argument is expected to contain a tag_type field denoting what
450
  object type we work on.
451

452
  """
453
  kind, name = _ExtractTagsObject(opts, args)
454
  _ExtendTags(opts, args)
455
  if not args:
456
    raise errors.OpPrereqError("No tags to be removed")
457
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
458
  SubmitOpCode(op, opts=opts)
459

    
460

    
461
def check_unit(option, opt, value): # pylint: disable-msg=W0613
462
  """OptParsers custom converter for units.
463

464
  """
465
  try:
466
    return utils.ParseUnit(value)
467
  except errors.UnitParseError, err:
468
    raise OptionValueError("option %s: %s" % (opt, err))
469

    
470

    
471
def _SplitKeyVal(opt, data):
472
  """Convert a KeyVal string into a dict.
473

474
  This function will convert a key=val[,...] string into a dict. Empty
475
  values will be converted specially: keys which have the prefix 'no_'
476
  will have the value=False and the prefix stripped, the others will
477
  have value=True.
478

479
  @type opt: string
480
  @param opt: a string holding the option name for which we process the
481
      data, used in building error messages
482
  @type data: string
483
  @param data: a string of the format key=val,key=val,...
484
  @rtype: dict
485
  @return: {key=val, key=val}
486
  @raises errors.ParameterError: if there are duplicate keys
487

488
  """
489
  kv_dict = {}
490
  if data:
491
    for elem in utils.UnescapeAndSplit(data, sep=","):
492
      if "=" in elem:
493
        key, val = elem.split("=", 1)
494
      else:
495
        if elem.startswith(NO_PREFIX):
496
          key, val = elem[len(NO_PREFIX):], False
497
        elif elem.startswith(UN_PREFIX):
498
          key, val = elem[len(UN_PREFIX):], None
499
        else:
500
          key, val = elem, True
501
      if key in kv_dict:
502
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
503
                                    (key, opt))
504
      kv_dict[key] = val
505
  return kv_dict
506

    
507

    
508
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
509
  """Custom parser for ident:key=val,key=val options.
510

511
  This will store the parsed values as a tuple (ident, {key: val}). As such,
512
  multiple uses of this option via action=append is possible.
513

514
  """
515
  if ":" not in value:
516
    ident, rest = value, ''
517
  else:
518
    ident, rest = value.split(":", 1)
519

    
520
  if ident.startswith(NO_PREFIX):
521
    if rest:
522
      msg = "Cannot pass options when removing parameter groups: %s" % value
523
      raise errors.ParameterError(msg)
524
    retval = (ident[len(NO_PREFIX):], False)
525
  elif ident.startswith(UN_PREFIX):
526
    if rest:
527
      msg = "Cannot pass options when removing parameter groups: %s" % value
528
      raise errors.ParameterError(msg)
529
    retval = (ident[len(UN_PREFIX):], None)
530
  else:
531
    kv_dict = _SplitKeyVal(opt, rest)
532
    retval = (ident, kv_dict)
533
  return retval
534

    
535

    
536
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
537
  """Custom parser class for key=val,key=val options.
538

539
  This will store the parsed values as a dict {key: val}.
540

541
  """
542
  return _SplitKeyVal(opt, value)
543

    
544

    
545
def check_bool(option, opt, value): # pylint: disable-msg=W0613
546
  """Custom parser for yes/no options.
547

548
  This will store the parsed value as either True or False.
549

550
  """
551
  value = value.lower()
552
  if value == constants.VALUE_FALSE or value == "no":
553
    return False
554
  elif value == constants.VALUE_TRUE or value == "yes":
555
    return True
556
  else:
557
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
558

    
559

    
560
# completion_suggestion is normally a list. Using numeric values not evaluating
561
# to False for dynamic completion.
562
(OPT_COMPL_MANY_NODES,
563
 OPT_COMPL_ONE_NODE,
564
 OPT_COMPL_ONE_INSTANCE,
565
 OPT_COMPL_ONE_OS,
566
 OPT_COMPL_ONE_IALLOCATOR,
567
 OPT_COMPL_INST_ADD_NODES,
568
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
569

    
570
OPT_COMPL_ALL = frozenset([
571
  OPT_COMPL_MANY_NODES,
572
  OPT_COMPL_ONE_NODE,
573
  OPT_COMPL_ONE_INSTANCE,
574
  OPT_COMPL_ONE_OS,
575
  OPT_COMPL_ONE_IALLOCATOR,
576
  OPT_COMPL_INST_ADD_NODES,
577
  OPT_COMPL_ONE_NODEGROUP,
578
  ])
579

    
580

    
581
class CliOption(Option):
582
  """Custom option class for optparse.
583

584
  """
585
  ATTRS = Option.ATTRS + [
586
    "completion_suggest",
587
    ]
588
  TYPES = Option.TYPES + (
589
    "identkeyval",
590
    "keyval",
591
    "unit",
592
    "bool",
593
    )
594
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
595
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
596
  TYPE_CHECKER["keyval"] = check_key_val
597
  TYPE_CHECKER["unit"] = check_unit
598
  TYPE_CHECKER["bool"] = check_bool
599

    
600

    
601
# optparse.py sets make_option, so we do it for our own option class, too
602
cli_option = CliOption
603

    
604

    
605
_YORNO = "yes|no"
606

    
607
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
608
                       help="Increase debugging level")
609

    
610
NOHDR_OPT = cli_option("--no-headers", default=False,
611
                       action="store_true", dest="no_headers",
612
                       help="Don't display column headers")
613

    
614
SEP_OPT = cli_option("--separator", default=None,
615
                     action="store", dest="separator",
616
                     help=("Separator between output fields"
617
                           " (defaults to one space)"))
618

    
619
USEUNITS_OPT = cli_option("--units", default=None,
620
                          dest="units", choices=('h', 'm', 'g', 't'),
621
                          help="Specify units for output (one of h/m/g/t)")
622

    
623
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
624
                        type="string", metavar="FIELDS",
625
                        help="Comma separated list of output fields")
626

    
627
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
628
                       default=False, help="Force the operation")
629

    
630
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
631
                         default=False, help="Do not require confirmation")
632

    
633
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
634
                                  action="store_true", default=False,
635
                                  help=("Ignore offline nodes and do as much"
636
                                        " as possible"))
637

    
638
TAG_ADD_OPT = cli_option("--tags", dest="tags",
639
                         default=None, help="Comma-separated list of instance"
640
                                            " tags")
641

    
642
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
643
                         default=None, help="File with tag names")
644

    
645
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
646
                        default=False, action="store_true",
647
                        help=("Submit the job and return the job ID, but"
648
                              " don't wait for the job to finish"))
649

    
650
SYNC_OPT = cli_option("--sync", dest="do_locking",
651
                      default=False, action="store_true",
652
                      help=("Grab locks while doing the queries"
653
                            " in order to ensure more consistent results"))
654

    
655
DRY_RUN_OPT = cli_option("--dry-run", default=False,
656
                         action="store_true",
657
                         help=("Do not execute the operation, just run the"
658
                               " check steps and verify it it could be"
659
                               " executed"))
660

    
661
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
662
                         action="store_true",
663
                         help="Increase the verbosity of the operation")
664

    
665
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
666
                              action="store_true", dest="simulate_errors",
667
                              help="Debugging option that makes the operation"
668
                              " treat most runtime checks as failed")
669

    
670
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
671
                        default=True, action="store_false",
672
                        help="Don't wait for sync (DANGEROUS!)")
673

    
674
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
675
                               help=("Custom disk setup (%s)" %
676
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
677
                               default=None, metavar="TEMPL",
678
                               choices=list(constants.DISK_TEMPLATES))
679

    
680
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
681
                        help="Do not create any network cards for"
682
                        " the instance")
683

    
684
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
685
                               help="Relative path under default cluster-wide"
686
                               " file storage dir to store file-based disks",
687
                               default=None, metavar="<DIR>")
688

    
689
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
690
                                  help="Driver to use for image files",
691
                                  default="loop", metavar="<DRIVER>",
692
                                  choices=list(constants.FILE_DRIVER))
693

    
694
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
695
                            help="Select nodes for the instance automatically"
696
                            " using the <NAME> iallocator plugin",
697
                            default=None, type="string",
698
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
699

    
700
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
701
                            metavar="<NAME>",
702
                            help="Set the default instance allocator plugin",
703
                            default=None, type="string",
704
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
705

    
706
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
707
                    metavar="<os>",
708
                    completion_suggest=OPT_COMPL_ONE_OS)
709

    
710
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
711
                         type="keyval", default={},
712
                         help="OS parameters")
713

    
714
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
715
                               action="store_true", default=False,
716
                               help="Force an unknown variant")
717

    
718
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
719
                            action="store_true", default=False,
720
                            help="Do not install the OS (will"
721
                            " enable no-start)")
722

    
723
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
724
                         type="keyval", default={},
725
                         help="Backend parameters")
726

    
727
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
728
                         default={}, dest="hvparams",
729
                         help="Hypervisor parameters")
730

    
731
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
732
                            help="Hypervisor and hypervisor options, in the"
733
                            " format hypervisor:option=value,option=value,...",
734
                            default=None, type="identkeyval")
735

    
736
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
737
                        help="Hypervisor and hypervisor options, in the"
738
                        " format hypervisor:option=value,option=value,...",
739
                        default=[], action="append", type="identkeyval")
740

    
741
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
742
                           action="store_false",
743
                           help="Don't check that the instance's IP"
744
                           " is alive")
745

    
746
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
747
                             default=True, action="store_false",
748
                             help="Don't check that the instance's name"
749
                             " is resolvable")
750

    
751
NET_OPT = cli_option("--net",
752
                     help="NIC parameters", default=[],
753
                     dest="nics", action="append", type="identkeyval")
754

    
755
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
756
                      dest="disks", action="append", type="identkeyval")
757

    
758
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
759
                         help="Comma-separated list of disks"
760
                         " indices to act on (e.g. 0,2) (optional,"
761
                         " defaults to all disks)")
762

    
763
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
764
                         help="Enforces a single-disk configuration using the"
765
                         " given disk size, in MiB unless a suffix is used",
766
                         default=None, type="unit", metavar="<size>")
767

    
768
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
769
                                dest="ignore_consistency",
770
                                action="store_true", default=False,
771
                                help="Ignore the consistency of the disks on"
772
                                " the secondary")
773

    
774
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
775
                                dest="allow_failover",
776
                                action="store_true", default=False,
777
                                help="If migration is not possible fallback to"
778
                                     " failover")
779

    
780
NONLIVE_OPT = cli_option("--non-live", dest="live",
781
                         default=True, action="store_false",
782
                         help="Do a non-live migration (this usually means"
783
                         " freeze the instance, save the state, transfer and"
784
                         " only then resume running on the secondary node)")
785

    
786
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
787
                                default=None,
788
                                choices=list(constants.HT_MIGRATION_MODES),
789
                                help="Override default migration mode (choose"
790
                                " either live or non-live")
791

    
792
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
793
                                help="Target node and optional secondary node",
794
                                metavar="<pnode>[:<snode>]",
795
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
796

    
797
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
798
                           action="append", metavar="<node>",
799
                           help="Use only this node (can be used multiple"
800
                           " times, if not given defaults to all nodes)",
801
                           completion_suggest=OPT_COMPL_ONE_NODE)
802

    
803
NODEGROUP_OPT = cli_option("-g", "--node-group",
804
                           dest="nodegroup",
805
                           help="Node group (name or uuid)",
806
                           metavar="<nodegroup>",
807
                           default=None, type="string",
808
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
809

    
810
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
811
                             metavar="<node>",
812
                             completion_suggest=OPT_COMPL_ONE_NODE)
813

    
814
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
815
                         action="store_false",
816
                         help="Don't start the instance after creation")
817

    
818
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
819
                         action="store_true", default=False,
820
                         help="Show command instead of executing it")
821

    
822
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
823
                         default=False, action="store_true",
824
                         help="Instead of performing the migration, try to"
825
                         " recover from a failed cleanup. This is safe"
826
                         " to run even if the instance is healthy, but it"
827
                         " will create extra replication traffic and "
828
                         " disrupt briefly the replication (like during the"
829
                         " migration")
830

    
831
STATIC_OPT = cli_option("-s", "--static", dest="static",
832
                        action="store_true", default=False,
833
                        help="Only show configuration data, not runtime data")
834

    
835
ALL_OPT = cli_option("--all", dest="show_all",
836
                     default=False, action="store_true",
837
                     help="Show info on all instances on the cluster."
838
                     " This can take a long time to run, use wisely")
839

    
840
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
841
                           action="store_true", default=False,
842
                           help="Interactive OS reinstall, lists available"
843
                           " OS templates for selection")
844

    
845
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
846
                                 action="store_true", default=False,
847
                                 help="Remove the instance from the cluster"
848
                                 " configuration even if there are failures"
849
                                 " during the removal process")
850

    
851
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
852
                                        dest="ignore_remove_failures",
853
                                        action="store_true", default=False,
854
                                        help="Remove the instance from the"
855
                                        " cluster configuration even if there"
856
                                        " are failures during the removal"
857
                                        " process")
858

    
859
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
860
                                 action="store_true", default=False,
861
                                 help="Remove the instance from the cluster")
862

    
863
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
864
                               help="Specifies the new node for the instance",
865
                               metavar="NODE", default=None,
866
                               completion_suggest=OPT_COMPL_ONE_NODE)
867

    
868
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
869
                               help="Specifies the new secondary node",
870
                               metavar="NODE", default=None,
871
                               completion_suggest=OPT_COMPL_ONE_NODE)
872

    
873
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
874
                            default=False, action="store_true",
875
                            help="Replace the disk(s) on the primary"
876
                                 " node (applies only to internally mirrored"
877
                                 " disk templates, e.g. %s)" %
878
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
879

    
880
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
881
                              default=False, action="store_true",
882
                              help="Replace the disk(s) on the secondary"
883
                                   " node (applies only to internally mirrored"
884
                                   " disk templates, e.g. %s)" %
885
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
886

    
887
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
888
                              default=False, action="store_true",
889
                              help="Lock all nodes and auto-promote as needed"
890
                              " to MC status")
891

    
892
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
893
                              default=False, action="store_true",
894
                              help="Automatically replace faulty disks"
895
                                   " (applies only to internally mirrored"
896
                                   " disk templates, e.g. %s)" %
897
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
898

    
899
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
900
                             default=False, action="store_true",
901
                             help="Ignore current recorded size"
902
                             " (useful for forcing activation when"
903
                             " the recorded size is wrong)")
904

    
905
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
906
                          metavar="<node>",
907
                          completion_suggest=OPT_COMPL_ONE_NODE)
908

    
909
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
910
                         metavar="<dir>")
911

    
912
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
913
                              help="Specify the secondary ip for the node",
914
                              metavar="ADDRESS", default=None)
915

    
916
READD_OPT = cli_option("--readd", dest="readd",
917
                       default=False, action="store_true",
918
                       help="Readd old node after replacing it")
919

    
920
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
921
                                default=True, action="store_false",
922
                                help="Disable SSH key fingerprint checking")
923

    
924
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
925
                                 default=False, action="store_true",
926
                                 help="Force the joining of a node")
927

    
928
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
929
                    type="bool", default=None, metavar=_YORNO,
930
                    help="Set the master_candidate flag on the node")
931

    
932
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
933
                         type="bool", default=None,
934
                         help=("Set the offline flag on the node"
935
                               " (cluster does not communicate with offline"
936
                               " nodes)"))
937

    
938
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
939
                         type="bool", default=None,
940
                         help=("Set the drained flag on the node"
941
                               " (excluded from allocation operations)"))
942

    
943
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
944
                    type="bool", default=None, metavar=_YORNO,
945
                    help="Set the master_capable flag on the node")
946

    
947
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
948
                    type="bool", default=None, metavar=_YORNO,
949
                    help="Set the vm_capable flag on the node")
950

    
951
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
952
                             type="bool", default=None, metavar=_YORNO,
953
                             help="Set the allocatable flag on a volume")
954

    
955
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
956
                               help="Disable support for lvm based instances"
957
                               " (cluster-wide)",
958
                               action="store_false", default=True)
959

    
960
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
961
                            dest="enabled_hypervisors",
962
                            help="Comma-separated list of hypervisors",
963
                            type="string", default=None)
964

    
965
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
966
                            type="keyval", default={},
967
                            help="NIC parameters")
968

    
969
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
970
                         dest="candidate_pool_size", type="int",
971
                         help="Set the candidate pool size")
972

    
973
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
974
                         help=("Enables LVM and specifies the volume group"
975
                               " name (cluster-wide) for disk allocation"
976
                               " [%s]" % constants.DEFAULT_VG),
977
                         metavar="VG", default=None)
978

    
979
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
980
                          help="Destroy cluster", action="store_true")
981

    
982
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
983
                          help="Skip node agreement check (dangerous)",
984
                          action="store_true", default=False)
985

    
986
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
987
                            help="Specify the mac prefix for the instance IP"
988
                            " addresses, in the format XX:XX:XX",
989
                            metavar="PREFIX",
990
                            default=None)
991

    
992
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
993
                               help="Specify the node interface (cluster-wide)"
994
                               " on which the master IP address will be added"
995
                               " (cluster init default: %s)" %
996
                               constants.DEFAULT_BRIDGE,
997
                               metavar="NETDEV",
998
                               default=None)
999

    
1000
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1001
                                help="Specify the default directory (cluster-"
1002
                                "wide) for storing the file-based disks [%s]" %
1003
                                constants.DEFAULT_FILE_STORAGE_DIR,
1004
                                metavar="DIR",
1005
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1006

    
1007
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1008
                            dest="shared_file_storage_dir",
1009
                            help="Specify the default directory (cluster-"
1010
                            "wide) for storing the shared file-based"
1011
                            " disks [%s]" %
1012
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1013
                            metavar="SHAREDDIR",
1014
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1015

    
1016
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1017
                                   help="Don't modify /etc/hosts",
1018
                                   action="store_false", default=True)
1019

    
1020
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1021
                                    help="Don't initialize SSH keys",
1022
                                    action="store_false", default=True)
1023

    
1024
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1025
                             help="Enable parseable error messages",
1026
                             action="store_true", default=False)
1027

    
1028
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1029
                          help="Skip N+1 memory redundancy tests",
1030
                          action="store_true", default=False)
1031

    
1032
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1033
                             help="Type of reboot: soft/hard/full",
1034
                             default=constants.INSTANCE_REBOOT_HARD,
1035
                             metavar="<REBOOT>",
1036
                             choices=list(constants.REBOOT_TYPES))
1037

    
1038
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1039
                                    dest="ignore_secondaries",
1040
                                    default=False, action="store_true",
1041
                                    help="Ignore errors from secondaries")
1042

    
1043
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1044
                            action="store_false", default=True,
1045
                            help="Don't shutdown the instance (unsafe)")
1046

    
1047
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1048
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1049
                         help="Maximum time to wait")
1050

    
1051
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1052
                         dest="shutdown_timeout", type="int",
1053
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1054
                         help="Maximum time to wait for instance shutdown")
1055

    
1056
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1057
                          default=None,
1058
                          help=("Number of seconds between repetions of the"
1059
                                " command"))
1060

    
1061
EARLY_RELEASE_OPT = cli_option("--early-release",
1062
                               dest="early_release", default=False,
1063
                               action="store_true",
1064
                               help="Release the locks on the secondary"
1065
                               " node(s) early")
1066

    
1067
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1068
                                  dest="new_cluster_cert",
1069
                                  default=False, action="store_true",
1070
                                  help="Generate a new cluster certificate")
1071

    
1072
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1073
                           default=None,
1074
                           help="File containing new RAPI certificate")
1075

    
1076
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1077
                               default=None, action="store_true",
1078
                               help=("Generate a new self-signed RAPI"
1079
                                     " certificate"))
1080

    
1081
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1082
                                    dest="new_confd_hmac_key",
1083
                                    default=False, action="store_true",
1084
                                    help=("Create a new HMAC key for %s" %
1085
                                          constants.CONFD))
1086

    
1087
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1088
                                       dest="cluster_domain_secret",
1089
                                       default=None,
1090
                                       help=("Load new new cluster domain"
1091
                                             " secret from file"))
1092

    
1093
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1094
                                           dest="new_cluster_domain_secret",
1095
                                           default=False, action="store_true",
1096
                                           help=("Create a new cluster domain"
1097
                                                 " secret"))
1098

    
1099
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1100
                              dest="use_replication_network",
1101
                              help="Whether to use the replication network"
1102
                              " for talking to the nodes",
1103
                              action="store_true", default=False)
1104

    
1105
MAINTAIN_NODE_HEALTH_OPT = \
1106
    cli_option("--maintain-node-health", dest="maintain_node_health",
1107
               metavar=_YORNO, default=None, type="bool",
1108
               help="Configure the cluster to automatically maintain node"
1109
               " health, by shutting down unknown instances, shutting down"
1110
               " unknown DRBD devices, etc.")
1111

    
1112
IDENTIFY_DEFAULTS_OPT = \
1113
    cli_option("--identify-defaults", dest="identify_defaults",
1114
               default=False, action="store_true",
1115
               help="Identify which saved instance parameters are equal to"
1116
               " the current cluster defaults and set them as such, instead"
1117
               " of marking them as overridden")
1118

    
1119
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1120
                         action="store", dest="uid_pool",
1121
                         help=("A list of user-ids or user-id"
1122
                               " ranges separated by commas"))
1123

    
1124
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1125
                          action="store", dest="add_uids",
1126
                          help=("A list of user-ids or user-id"
1127
                                " ranges separated by commas, to be"
1128
                                " added to the user-id pool"))
1129

    
1130
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1131
                             action="store", dest="remove_uids",
1132
                             help=("A list of user-ids or user-id"
1133
                                   " ranges separated by commas, to be"
1134
                                   " removed from the user-id pool"))
1135

    
1136
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1137
                             action="store", dest="reserved_lvs",
1138
                             help=("A comma-separated list of reserved"
1139
                                   " logical volumes names, that will be"
1140
                                   " ignored by cluster verify"))
1141

    
1142
ROMAN_OPT = cli_option("--roman",
1143
                       dest="roman_integers", default=False,
1144
                       action="store_true",
1145
                       help="Use roman numbers for positive integers")
1146

    
1147
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1148
                             action="store", default=None,
1149
                             help="Specifies usermode helper for DRBD")
1150

    
1151
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1152
                                action="store_false", default=True,
1153
                                help="Disable support for DRBD")
1154

    
1155
PRIMARY_IP_VERSION_OPT = \
1156
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1157
               action="store", dest="primary_ip_version",
1158
               metavar="%d|%d" % (constants.IP4_VERSION,
1159
                                  constants.IP6_VERSION),
1160
               help="Cluster-wide IP version for primary IP")
1161

    
1162
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1163
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1164
                          choices=_PRIONAME_TO_VALUE.keys(),
1165
                          help="Priority for opcode processing")
1166

    
1167
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1168
                        type="bool", default=None, metavar=_YORNO,
1169
                        help="Sets the hidden flag on the OS")
1170

    
1171
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1172
                        type="bool", default=None, metavar=_YORNO,
1173
                        help="Sets the blacklisted flag on the OS")
1174

    
1175
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1176
                                     type="bool", metavar=_YORNO,
1177
                                     dest="prealloc_wipe_disks",
1178
                                     help=("Wipe disks prior to instance"
1179
                                           " creation"))
1180

    
1181
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1182
                             type="keyval", default=None,
1183
                             help="Node parameters")
1184

    
1185
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1186
                              action="store", metavar="POLICY", default=None,
1187
                              help="Allocation policy for the node group")
1188

    
1189
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1190
                              type="bool", metavar=_YORNO,
1191
                              dest="node_powered",
1192
                              help="Specify if the SoR for node is powered")
1193

    
1194
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1195
                         default=constants.OOB_TIMEOUT,
1196
                         help="Maximum time to wait for out-of-band helper")
1197

    
1198
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1199
                             default=constants.OOB_POWER_DELAY,
1200
                             help="Time in seconds to wait between power-ons")
1201

    
1202
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1203
                              action="store_true", default=False,
1204
                              help=("Whether command argument should be treated"
1205
                                    " as filter"))
1206

    
1207
NO_REMEMBER_OPT = cli_option("--no-remember",
1208
                             dest="no_remember",
1209
                             action="store_true", default=False,
1210
                             help="Perform but do not record the change"
1211
                             " in the configuration")
1212

    
1213

    
1214
#: Options provided by all commands
1215
COMMON_OPTS = [DEBUG_OPT]
1216

    
1217
# common options for creating instances. add and import then add their own
1218
# specific ones.
1219
COMMON_CREATE_OPTS = [
1220
  BACKEND_OPT,
1221
  DISK_OPT,
1222
  DISK_TEMPLATE_OPT,
1223
  FILESTORE_DIR_OPT,
1224
  FILESTORE_DRIVER_OPT,
1225
  HYPERVISOR_OPT,
1226
  IALLOCATOR_OPT,
1227
  NET_OPT,
1228
  NODE_PLACEMENT_OPT,
1229
  NOIPCHECK_OPT,
1230
  NONAMECHECK_OPT,
1231
  NONICS_OPT,
1232
  NWSYNC_OPT,
1233
  OSPARAMS_OPT,
1234
  OS_SIZE_OPT,
1235
  SUBMIT_OPT,
1236
  DRY_RUN_OPT,
1237
  PRIORITY_OPT,
1238
  ]
1239

    
1240

    
1241
def _ParseArgs(argv, commands, aliases):
1242
  """Parser for the command line arguments.
1243

1244
  This function parses the arguments and returns the function which
1245
  must be executed together with its (modified) arguments.
1246

1247
  @param argv: the command line
1248
  @param commands: dictionary with special contents, see the design
1249
      doc for cmdline handling
1250
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1251

1252
  """
1253
  if len(argv) == 0:
1254
    binary = "<command>"
1255
  else:
1256
    binary = argv[0].split("/")[-1]
1257

    
1258
  if len(argv) > 1 and argv[1] == "--version":
1259
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1260
             constants.RELEASE_VERSION)
1261
    # Quit right away. That way we don't have to care about this special
1262
    # argument. optparse.py does it the same.
1263
    sys.exit(0)
1264

    
1265
  if len(argv) < 2 or not (argv[1] in commands or
1266
                           argv[1] in aliases):
1267
    # let's do a nice thing
1268
    sortedcmds = commands.keys()
1269
    sortedcmds.sort()
1270

    
1271
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1272
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1273
    ToStdout("")
1274

    
1275
    # compute the max line length for cmd + usage
1276
    mlen = max([len(" %s" % cmd) for cmd in commands])
1277
    mlen = min(60, mlen) # should not get here...
1278

    
1279
    # and format a nice command list
1280
    ToStdout("Commands:")
1281
    for cmd in sortedcmds:
1282
      cmdstr = " %s" % (cmd,)
1283
      help_text = commands[cmd][4]
1284
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1285
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1286
      for line in help_lines:
1287
        ToStdout("%-*s   %s", mlen, "", line)
1288

    
1289
    ToStdout("")
1290

    
1291
    return None, None, None
1292

    
1293
  # get command, unalias it, and look it up in commands
1294
  cmd = argv.pop(1)
1295
  if cmd in aliases:
1296
    if cmd in commands:
1297
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1298
                                   " command" % cmd)
1299

    
1300
    if aliases[cmd] not in commands:
1301
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1302
                                   " command '%s'" % (cmd, aliases[cmd]))
1303

    
1304
    cmd = aliases[cmd]
1305

    
1306
  func, args_def, parser_opts, usage, description = commands[cmd]
1307
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1308
                        description=description,
1309
                        formatter=TitledHelpFormatter(),
1310
                        usage="%%prog %s %s" % (cmd, usage))
1311
  parser.disable_interspersed_args()
1312
  options, args = parser.parse_args()
1313

    
1314
  if not _CheckArguments(cmd, args_def, args):
1315
    return None, None, None
1316

    
1317
  return func, options, args
1318

    
1319

    
1320
def _CheckArguments(cmd, args_def, args):
1321
  """Verifies the arguments using the argument definition.
1322

1323
  Algorithm:
1324

1325
    1. Abort with error if values specified by user but none expected.
1326

1327
    1. For each argument in definition
1328

1329
      1. Keep running count of minimum number of values (min_count)
1330
      1. Keep running count of maximum number of values (max_count)
1331
      1. If it has an unlimited number of values
1332

1333
        1. Abort with error if it's not the last argument in the definition
1334

1335
    1. If last argument has limited number of values
1336

1337
      1. Abort with error if number of values doesn't match or is too large
1338

1339
    1. Abort with error if user didn't pass enough values (min_count)
1340

1341
  """
1342
  if args and not args_def:
1343
    ToStderr("Error: Command %s expects no arguments", cmd)
1344
    return False
1345

    
1346
  min_count = None
1347
  max_count = None
1348
  check_max = None
1349

    
1350
  last_idx = len(args_def) - 1
1351

    
1352
  for idx, arg in enumerate(args_def):
1353
    if min_count is None:
1354
      min_count = arg.min
1355
    elif arg.min is not None:
1356
      min_count += arg.min
1357

    
1358
    if max_count is None:
1359
      max_count = arg.max
1360
    elif arg.max is not None:
1361
      max_count += arg.max
1362

    
1363
    if idx == last_idx:
1364
      check_max = (arg.max is not None)
1365

    
1366
    elif arg.max is None:
1367
      raise errors.ProgrammerError("Only the last argument can have max=None")
1368

    
1369
  if check_max:
1370
    # Command with exact number of arguments
1371
    if (min_count is not None and max_count is not None and
1372
        min_count == max_count and len(args) != min_count):
1373
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1374
      return False
1375

    
1376
    # Command with limited number of arguments
1377
    if max_count is not None and len(args) > max_count:
1378
      ToStderr("Error: Command %s expects only %d argument(s)",
1379
               cmd, max_count)
1380
      return False
1381

    
1382
  # Command with some required arguments
1383
  if min_count is not None and len(args) < min_count:
1384
    ToStderr("Error: Command %s expects at least %d argument(s)",
1385
             cmd, min_count)
1386
    return False
1387

    
1388
  return True
1389

    
1390

    
1391
def SplitNodeOption(value):
1392
  """Splits the value of a --node option.
1393

1394
  """
1395
  if value and ':' in value:
1396
    return value.split(':', 1)
1397
  else:
1398
    return (value, None)
1399

    
1400

    
1401
def CalculateOSNames(os_name, os_variants):
1402
  """Calculates all the names an OS can be called, according to its variants.
1403

1404
  @type os_name: string
1405
  @param os_name: base name of the os
1406
  @type os_variants: list or None
1407
  @param os_variants: list of supported variants
1408
  @rtype: list
1409
  @return: list of valid names
1410

1411
  """
1412
  if os_variants:
1413
    return ['%s+%s' % (os_name, v) for v in os_variants]
1414
  else:
1415
    return [os_name]
1416

    
1417

    
1418
def ParseFields(selected, default):
1419
  """Parses the values of "--field"-like options.
1420

1421
  @type selected: string or None
1422
  @param selected: User-selected options
1423
  @type default: list
1424
  @param default: Default fields
1425

1426
  """
1427
  if selected is None:
1428
    return default
1429

    
1430
  if selected.startswith("+"):
1431
    return default + selected[1:].split(",")
1432

    
1433
  return selected.split(",")
1434

    
1435

    
1436
UsesRPC = rpc.RunWithRPC
1437

    
1438

    
1439
def AskUser(text, choices=None):
1440
  """Ask the user a question.
1441

1442
  @param text: the question to ask
1443

1444
  @param choices: list with elements tuples (input_char, return_value,
1445
      description); if not given, it will default to: [('y', True,
1446
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1447
      note that the '?' char is reserved for help
1448

1449
  @return: one of the return values from the choices list; if input is
1450
      not possible (i.e. not running with a tty, we return the last
1451
      entry from the list
1452

1453
  """
1454
  if choices is None:
1455
    choices = [('y', True, 'Perform the operation'),
1456
               ('n', False, 'Do not perform the operation')]
1457
  if not choices or not isinstance(choices, list):
1458
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1459
  for entry in choices:
1460
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1461
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1462

    
1463
  answer = choices[-1][1]
1464
  new_text = []
1465
  for line in text.splitlines():
1466
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1467
  text = "\n".join(new_text)
1468
  try:
1469
    f = file("/dev/tty", "a+")
1470
  except IOError:
1471
    return answer
1472
  try:
1473
    chars = [entry[0] for entry in choices]
1474
    chars[-1] = "[%s]" % chars[-1]
1475
    chars.append('?')
1476
    maps = dict([(entry[0], entry[1]) for entry in choices])
1477
    while True:
1478
      f.write(text)
1479
      f.write('\n')
1480
      f.write("/".join(chars))
1481
      f.write(": ")
1482
      line = f.readline(2).strip().lower()
1483
      if line in maps:
1484
        answer = maps[line]
1485
        break
1486
      elif line == '?':
1487
        for entry in choices:
1488
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1489
        f.write("\n")
1490
        continue
1491
  finally:
1492
    f.close()
1493
  return answer
1494

    
1495

    
1496
class JobSubmittedException(Exception):
1497
  """Job was submitted, client should exit.
1498

1499
  This exception has one argument, the ID of the job that was
1500
  submitted. The handler should print this ID.
1501

1502
  This is not an error, just a structured way to exit from clients.
1503

1504
  """
1505

    
1506

    
1507
def SendJob(ops, cl=None):
1508
  """Function to submit an opcode without waiting for the results.
1509

1510
  @type ops: list
1511
  @param ops: list of opcodes
1512
  @type cl: luxi.Client
1513
  @param cl: the luxi client to use for communicating with the master;
1514
             if None, a new client will be created
1515

1516
  """
1517
  if cl is None:
1518
    cl = GetClient()
1519

    
1520
  job_id = cl.SubmitJob(ops)
1521

    
1522
  return job_id
1523

    
1524

    
1525
def GenericPollJob(job_id, cbs, report_cbs):
1526
  """Generic job-polling function.
1527

1528
  @type job_id: number
1529
  @param job_id: Job ID
1530
  @type cbs: Instance of L{JobPollCbBase}
1531
  @param cbs: Data callbacks
1532
  @type report_cbs: Instance of L{JobPollReportCbBase}
1533
  @param report_cbs: Reporting callbacks
1534

1535
  """
1536
  prev_job_info = None
1537
  prev_logmsg_serial = None
1538

    
1539
  status = None
1540

    
1541
  while True:
1542
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1543
                                      prev_logmsg_serial)
1544
    if not result:
1545
      # job not found, go away!
1546
      raise errors.JobLost("Job with id %s lost" % job_id)
1547

    
1548
    if result == constants.JOB_NOTCHANGED:
1549
      report_cbs.ReportNotChanged(job_id, status)
1550

    
1551
      # Wait again
1552
      continue
1553

    
1554
    # Split result, a tuple of (field values, log entries)
1555
    (job_info, log_entries) = result
1556
    (status, ) = job_info
1557

    
1558
    if log_entries:
1559
      for log_entry in log_entries:
1560
        (serial, timestamp, log_type, message) = log_entry
1561
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1562
                                    log_type, message)
1563
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1564

    
1565
    # TODO: Handle canceled and archived jobs
1566
    elif status in (constants.JOB_STATUS_SUCCESS,
1567
                    constants.JOB_STATUS_ERROR,
1568
                    constants.JOB_STATUS_CANCELING,
1569
                    constants.JOB_STATUS_CANCELED):
1570
      break
1571

    
1572
    prev_job_info = job_info
1573

    
1574
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1575
  if not jobs:
1576
    raise errors.JobLost("Job with id %s lost" % job_id)
1577

    
1578
  status, opstatus, result = jobs[0]
1579

    
1580
  if status == constants.JOB_STATUS_SUCCESS:
1581
    return result
1582

    
1583
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1584
    raise errors.OpExecError("Job was canceled")
1585

    
1586
  has_ok = False
1587
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1588
    if status == constants.OP_STATUS_SUCCESS:
1589
      has_ok = True
1590
    elif status == constants.OP_STATUS_ERROR:
1591
      errors.MaybeRaise(msg)
1592

    
1593
      if has_ok:
1594
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1595
                                 (idx, msg))
1596

    
1597
      raise errors.OpExecError(str(msg))
1598

    
1599
  # default failure mode
1600
  raise errors.OpExecError(result)
1601

    
1602

    
1603
class JobPollCbBase:
1604
  """Base class for L{GenericPollJob} callbacks.
1605

1606
  """
1607
  def __init__(self):
1608
    """Initializes this class.
1609

1610
    """
1611

    
1612
  def WaitForJobChangeOnce(self, job_id, fields,
1613
                           prev_job_info, prev_log_serial):
1614
    """Waits for changes on a job.
1615

1616
    """
1617
    raise NotImplementedError()
1618

    
1619
  def QueryJobs(self, job_ids, fields):
1620
    """Returns the selected fields for the selected job IDs.
1621

1622
    @type job_ids: list of numbers
1623
    @param job_ids: Job IDs
1624
    @type fields: list of strings
1625
    @param fields: Fields
1626

1627
    """
1628
    raise NotImplementedError()
1629

    
1630

    
1631
class JobPollReportCbBase:
1632
  """Base class for L{GenericPollJob} reporting callbacks.
1633

1634
  """
1635
  def __init__(self):
1636
    """Initializes this class.
1637

1638
    """
1639

    
1640
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1641
    """Handles a log message.
1642

1643
    """
1644
    raise NotImplementedError()
1645

    
1646
  def ReportNotChanged(self, job_id, status):
1647
    """Called for if a job hasn't changed in a while.
1648

1649
    @type job_id: number
1650
    @param job_id: Job ID
1651
    @type status: string or None
1652
    @param status: Job status if available
1653

1654
    """
1655
    raise NotImplementedError()
1656

    
1657

    
1658
class _LuxiJobPollCb(JobPollCbBase):
1659
  def __init__(self, cl):
1660
    """Initializes this class.
1661

1662
    """
1663
    JobPollCbBase.__init__(self)
1664
    self.cl = cl
1665

    
1666
  def WaitForJobChangeOnce(self, job_id, fields,
1667
                           prev_job_info, prev_log_serial):
1668
    """Waits for changes on a job.
1669

1670
    """
1671
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1672
                                        prev_job_info, prev_log_serial)
1673

    
1674
  def QueryJobs(self, job_ids, fields):
1675
    """Returns the selected fields for the selected job IDs.
1676

1677
    """
1678
    return self.cl.QueryJobs(job_ids, fields)
1679

    
1680

    
1681
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1682
  def __init__(self, feedback_fn):
1683
    """Initializes this class.
1684

1685
    """
1686
    JobPollReportCbBase.__init__(self)
1687

    
1688
    self.feedback_fn = feedback_fn
1689

    
1690
    assert callable(feedback_fn)
1691

    
1692
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1693
    """Handles a log message.
1694

1695
    """
1696
    self.feedback_fn((timestamp, log_type, log_msg))
1697

    
1698
  def ReportNotChanged(self, job_id, status):
1699
    """Called if a job hasn't changed in a while.
1700

1701
    """
1702
    # Ignore
1703

    
1704

    
1705
class StdioJobPollReportCb(JobPollReportCbBase):
1706
  def __init__(self):
1707
    """Initializes this class.
1708

1709
    """
1710
    JobPollReportCbBase.__init__(self)
1711

    
1712
    self.notified_queued = False
1713
    self.notified_waitlock = False
1714

    
1715
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1716
    """Handles a log message.
1717

1718
    """
1719
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1720
             FormatLogMessage(log_type, log_msg))
1721

    
1722
  def ReportNotChanged(self, job_id, status):
1723
    """Called if a job hasn't changed in a while.
1724

1725
    """
1726
    if status is None:
1727
      return
1728

    
1729
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1730
      ToStderr("Job %s is waiting in queue", job_id)
1731
      self.notified_queued = True
1732

    
1733
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1734
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1735
      self.notified_waitlock = True
1736

    
1737

    
1738
def FormatLogMessage(log_type, log_msg):
1739
  """Formats a job message according to its type.
1740

1741
  """
1742
  if log_type != constants.ELOG_MESSAGE:
1743
    log_msg = str(log_msg)
1744

    
1745
  return utils.SafeEncode(log_msg)
1746

    
1747

    
1748
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1749
  """Function to poll for the result of a job.
1750

1751
  @type job_id: job identified
1752
  @param job_id: the job to poll for results
1753
  @type cl: luxi.Client
1754
  @param cl: the luxi client to use for communicating with the master;
1755
             if None, a new client will be created
1756

1757
  """
1758
  if cl is None:
1759
    cl = GetClient()
1760

    
1761
  if reporter is None:
1762
    if feedback_fn:
1763
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1764
    else:
1765
      reporter = StdioJobPollReportCb()
1766
  elif feedback_fn:
1767
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1768

    
1769
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1770

    
1771

    
1772
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1773
  """Legacy function to submit an opcode.
1774

1775
  This is just a simple wrapper over the construction of the processor
1776
  instance. It should be extended to better handle feedback and
1777
  interaction functions.
1778

1779
  """
1780
  if cl is None:
1781
    cl = GetClient()
1782

    
1783
  SetGenericOpcodeOpts([op], opts)
1784

    
1785
  job_id = SendJob([op], cl=cl)
1786

    
1787
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1788
                       reporter=reporter)
1789

    
1790
  return op_results[0]
1791

    
1792

    
1793
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1794
  """Wrapper around SubmitOpCode or SendJob.
1795

1796
  This function will decide, based on the 'opts' parameter, whether to
1797
  submit and wait for the result of the opcode (and return it), or
1798
  whether to just send the job and print its identifier. It is used in
1799
  order to simplify the implementation of the '--submit' option.
1800

1801
  It will also process the opcodes if we're sending the via SendJob
1802
  (otherwise SubmitOpCode does it).
1803

1804
  """
1805
  if opts and opts.submit_only:
1806
    job = [op]
1807
    SetGenericOpcodeOpts(job, opts)
1808
    job_id = SendJob(job, cl=cl)
1809
    raise JobSubmittedException(job_id)
1810
  else:
1811
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1812

    
1813

    
1814
def SetGenericOpcodeOpts(opcode_list, options):
1815
  """Processor for generic options.
1816

1817
  This function updates the given opcodes based on generic command
1818
  line options (like debug, dry-run, etc.).
1819

1820
  @param opcode_list: list of opcodes
1821
  @param options: command line options or None
1822
  @return: None (in-place modification)
1823

1824
  """
1825
  if not options:
1826
    return
1827
  for op in opcode_list:
1828
    op.debug_level = options.debug
1829
    if hasattr(options, "dry_run"):
1830
      op.dry_run = options.dry_run
1831
    if getattr(options, "priority", None) is not None:
1832
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1833

    
1834

    
1835
def GetClient():
1836
  # TODO: Cache object?
1837
  try:
1838
    client = luxi.Client()
1839
  except luxi.NoMasterError:
1840
    ss = ssconf.SimpleStore()
1841

    
1842
    # Try to read ssconf file
1843
    try:
1844
      ss.GetMasterNode()
1845
    except errors.ConfigurationError:
1846
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1847
                                 " not part of a cluster")
1848

    
1849
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1850
    if master != myself:
1851
      raise errors.OpPrereqError("This is not the master node, please connect"
1852
                                 " to node '%s' and rerun the command" %
1853
                                 master)
1854
    raise
1855
  return client
1856

    
1857

    
1858
def FormatError(err):
1859
  """Return a formatted error message for a given error.
1860

1861
  This function takes an exception instance and returns a tuple
1862
  consisting of two values: first, the recommended exit code, and
1863
  second, a string describing the error message (not
1864
  newline-terminated).
1865

1866
  """
1867
  retcode = 1
1868
  obuf = StringIO()
1869
  msg = str(err)
1870
  if isinstance(err, errors.ConfigurationError):
1871
    txt = "Corrupt configuration file: %s" % msg
1872
    logging.error(txt)
1873
    obuf.write(txt + "\n")
1874
    obuf.write("Aborting.")
1875
    retcode = 2
1876
  elif isinstance(err, errors.HooksAbort):
1877
    obuf.write("Failure: hooks execution failed:\n")
1878
    for node, script, out in err.args[0]:
1879
      if out:
1880
        obuf.write("  node: %s, script: %s, output: %s\n" %
1881
                   (node, script, out))
1882
      else:
1883
        obuf.write("  node: %s, script: %s (no output)\n" %
1884
                   (node, script))
1885
  elif isinstance(err, errors.HooksFailure):
1886
    obuf.write("Failure: hooks general failure: %s" % msg)
1887
  elif isinstance(err, errors.ResolverError):
1888
    this_host = netutils.Hostname.GetSysName()
1889
    if err.args[0] == this_host:
1890
      msg = "Failure: can't resolve my own hostname ('%s')"
1891
    else:
1892
      msg = "Failure: can't resolve hostname '%s'"
1893
    obuf.write(msg % err.args[0])
1894
  elif isinstance(err, errors.OpPrereqError):
1895
    if len(err.args) == 2:
1896
      obuf.write("Failure: prerequisites not met for this"
1897
               " operation:\nerror type: %s, error details:\n%s" %
1898
                 (err.args[1], err.args[0]))
1899
    else:
1900
      obuf.write("Failure: prerequisites not met for this"
1901
                 " operation:\n%s" % msg)
1902
  elif isinstance(err, errors.OpExecError):
1903
    obuf.write("Failure: command execution error:\n%s" % msg)
1904
  elif isinstance(err, errors.TagError):
1905
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1906
  elif isinstance(err, errors.JobQueueDrainError):
1907
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1908
               " accept new requests\n")
1909
  elif isinstance(err, errors.JobQueueFull):
1910
    obuf.write("Failure: the job queue is full and doesn't accept new"
1911
               " job submissions until old jobs are archived\n")
1912
  elif isinstance(err, errors.TypeEnforcementError):
1913
    obuf.write("Parameter Error: %s" % msg)
1914
  elif isinstance(err, errors.ParameterError):
1915
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1916
  elif isinstance(err, luxi.NoMasterError):
1917
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1918
               " and listening for connections?")
1919
  elif isinstance(err, luxi.TimeoutError):
1920
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1921
               " been submitted and will continue to run even if the call"
1922
               " timed out. Useful commands in this situation are \"gnt-job"
1923
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1924
    obuf.write(msg)
1925
  elif isinstance(err, luxi.PermissionError):
1926
    obuf.write("It seems you don't have permissions to connect to the"
1927
               " master daemon.\nPlease retry as a different user.")
1928
  elif isinstance(err, luxi.ProtocolError):
1929
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1930
               "%s" % msg)
1931
  elif isinstance(err, errors.JobLost):
1932
    obuf.write("Error checking job status: %s" % msg)
1933
  elif isinstance(err, errors.QueryFilterParseError):
1934
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1935
    obuf.write("\n".join(err.GetDetails()))
1936
  elif isinstance(err, errors.GenericError):
1937
    obuf.write("Unhandled Ganeti error: %s" % msg)
1938
  elif isinstance(err, JobSubmittedException):
1939
    obuf.write("JobID: %s\n" % err.args[0])
1940
    retcode = 0
1941
  else:
1942
    obuf.write("Unhandled exception: %s" % msg)
1943
  return retcode, obuf.getvalue().rstrip('\n')
1944

    
1945

    
1946
def GenericMain(commands, override=None, aliases=None):
1947
  """Generic main function for all the gnt-* commands.
1948

1949
  Arguments:
1950
    - commands: a dictionary with a special structure, see the design doc
1951
                for command line handling.
1952
    - override: if not None, we expect a dictionary with keys that will
1953
                override command line options; this can be used to pass
1954
                options from the scripts to generic functions
1955
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1956

1957
  """
1958
  # save the program name and the entire command line for later logging
1959
  if sys.argv:
1960
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1961
    if len(sys.argv) >= 2:
1962
      binary += " " + sys.argv[1]
1963
      old_cmdline = " ".join(sys.argv[2:])
1964
    else:
1965
      old_cmdline = ""
1966
  else:
1967
    binary = "<unknown program>"
1968
    old_cmdline = ""
1969

    
1970
  if aliases is None:
1971
    aliases = {}
1972

    
1973
  try:
1974
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1975
  except errors.ParameterError, err:
1976
    result, err_msg = FormatError(err)
1977
    ToStderr(err_msg)
1978
    return 1
1979

    
1980
  if func is None: # parse error
1981
    return 1
1982

    
1983
  if override is not None:
1984
    for key, val in override.iteritems():
1985
      setattr(options, key, val)
1986

    
1987
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1988
                     stderr_logging=True)
1989

    
1990
  if old_cmdline:
1991
    logging.info("run with arguments '%s'", old_cmdline)
1992
  else:
1993
    logging.info("run with no arguments")
1994

    
1995
  try:
1996
    result = func(options, args)
1997
  except (errors.GenericError, luxi.ProtocolError,
1998
          JobSubmittedException), err:
1999
    result, err_msg = FormatError(err)
2000
    logging.exception("Error during command processing")
2001
    ToStderr(err_msg)
2002
  except KeyboardInterrupt:
2003
    result = constants.EXIT_FAILURE
2004
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2005
             " might have been submitted and"
2006
             " will continue to run in the background.")
2007
  except IOError, err:
2008
    if err.errno == errno.EPIPE:
2009
      # our terminal went away, we'll exit
2010
      sys.exit(constants.EXIT_FAILURE)
2011
    else:
2012
      raise
2013

    
2014
  return result
2015

    
2016

    
2017
def ParseNicOption(optvalue):
2018
  """Parses the value of the --net option(s).
2019

2020
  """
2021
  try:
2022
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2023
  except (TypeError, ValueError), err:
2024
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2025

    
2026
  nics = [{}] * nic_max
2027
  for nidx, ndict in optvalue:
2028
    nidx = int(nidx)
2029

    
2030
    if not isinstance(ndict, dict):
2031
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2032
                                 " got %s" % (nidx, ndict))
2033

    
2034
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2035

    
2036
    nics[nidx] = ndict
2037

    
2038
  return nics
2039

    
2040

    
2041
def GenericInstanceCreate(mode, opts, args):
2042
  """Add an instance to the cluster via either creation or import.
2043

2044
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2045
  @param opts: the command line options selected by the user
2046
  @type args: list
2047
  @param args: should contain only one element, the new instance name
2048
  @rtype: int
2049
  @return: the desired exit code
2050

2051
  """
2052
  instance = args[0]
2053

    
2054
  (pnode, snode) = SplitNodeOption(opts.node)
2055

    
2056
  hypervisor = None
2057
  hvparams = {}
2058
  if opts.hypervisor:
2059
    hypervisor, hvparams = opts.hypervisor
2060

    
2061
  if opts.nics:
2062
    nics = ParseNicOption(opts.nics)
2063
  elif opts.no_nics:
2064
    # no nics
2065
    nics = []
2066
  elif mode == constants.INSTANCE_CREATE:
2067
    # default of one nic, all auto
2068
    nics = [{}]
2069
  else:
2070
    # mode == import
2071
    nics = []
2072

    
2073
  if opts.disk_template == constants.DT_DISKLESS:
2074
    if opts.disks or opts.sd_size is not None:
2075
      raise errors.OpPrereqError("Diskless instance but disk"
2076
                                 " information passed")
2077
    disks = []
2078
  else:
2079
    if (not opts.disks and not opts.sd_size
2080
        and mode == constants.INSTANCE_CREATE):
2081
      raise errors.OpPrereqError("No disk information specified")
2082
    if opts.disks and opts.sd_size is not None:
2083
      raise errors.OpPrereqError("Please use either the '--disk' or"
2084
                                 " '-s' option")
2085
    if opts.sd_size is not None:
2086
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2087

    
2088
    if opts.disks:
2089
      try:
2090
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2091
      except ValueError, err:
2092
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2093
      disks = [{}] * disk_max
2094
    else:
2095
      disks = []
2096
    for didx, ddict in opts.disks:
2097
      didx = int(didx)
2098
      if not isinstance(ddict, dict):
2099
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2100
        raise errors.OpPrereqError(msg)
2101
      elif constants.IDISK_SIZE in ddict:
2102
        if constants.IDISK_ADOPT in ddict:
2103
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2104
                                     " (disk %d)" % didx)
2105
        try:
2106
          ddict[constants.IDISK_SIZE] = \
2107
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2108
        except ValueError, err:
2109
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2110
                                     (didx, err))
2111
      elif constants.IDISK_ADOPT in ddict:
2112
        if mode == constants.INSTANCE_IMPORT:
2113
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2114
                                     " import")
2115
        ddict[constants.IDISK_SIZE] = 0
2116
      else:
2117
        raise errors.OpPrereqError("Missing size or adoption source for"
2118
                                   " disk %d" % didx)
2119
      disks[didx] = ddict
2120

    
2121
  if opts.tags is not None:
2122
    tags = opts.tags.split(',')
2123
  else:
2124
    tags = []
2125

    
2126
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2127
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2128

    
2129
  if mode == constants.INSTANCE_CREATE:
2130
    start = opts.start
2131
    os_type = opts.os
2132
    force_variant = opts.force_variant
2133
    src_node = None
2134
    src_path = None
2135
    no_install = opts.no_install
2136
    identify_defaults = False
2137
  elif mode == constants.INSTANCE_IMPORT:
2138
    start = False
2139
    os_type = None
2140
    force_variant = False
2141
    src_node = opts.src_node
2142
    src_path = opts.src_dir
2143
    no_install = None
2144
    identify_defaults = opts.identify_defaults
2145
  else:
2146
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2147

    
2148
  op = opcodes.OpInstanceCreate(instance_name=instance,
2149
                                disks=disks,
2150
                                disk_template=opts.disk_template,
2151
                                nics=nics,
2152
                                pnode=pnode, snode=snode,
2153
                                ip_check=opts.ip_check,
2154
                                name_check=opts.name_check,
2155
                                wait_for_sync=opts.wait_for_sync,
2156
                                file_storage_dir=opts.file_storage_dir,
2157
                                file_driver=opts.file_driver,
2158
                                iallocator=opts.iallocator,
2159
                                hypervisor=hypervisor,
2160
                                hvparams=hvparams,
2161
                                beparams=opts.beparams,
2162
                                osparams=opts.osparams,
2163
                                mode=mode,
2164
                                start=start,
2165
                                os_type=os_type,
2166
                                force_variant=force_variant,
2167
                                src_node=src_node,
2168
                                src_path=src_path,
2169
                                tags=tags,
2170
                                no_install=no_install,
2171
                                identify_defaults=identify_defaults)
2172

    
2173
  SubmitOrSend(op, opts)
2174
  return 0
2175

    
2176

    
2177
class _RunWhileClusterStoppedHelper:
2178
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2179

2180
  """
2181
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2182
    """Initializes this class.
2183

2184
    @type feedback_fn: callable
2185
    @param feedback_fn: Feedback function
2186
    @type cluster_name: string
2187
    @param cluster_name: Cluster name
2188
    @type master_node: string
2189
    @param master_node Master node name
2190
    @type online_nodes: list
2191
    @param online_nodes: List of names of online nodes
2192

2193
    """
2194
    self.feedback_fn = feedback_fn
2195
    self.cluster_name = cluster_name
2196
    self.master_node = master_node
2197
    self.online_nodes = online_nodes
2198

    
2199
    self.ssh = ssh.SshRunner(self.cluster_name)
2200

    
2201
    self.nonmaster_nodes = [name for name in online_nodes
2202
                            if name != master_node]
2203

    
2204
    assert self.master_node not in self.nonmaster_nodes
2205

    
2206
  def _RunCmd(self, node_name, cmd):
2207
    """Runs a command on the local or a remote machine.
2208

2209
    @type node_name: string
2210
    @param node_name: Machine name
2211
    @type cmd: list
2212
    @param cmd: Command
2213

2214
    """
2215
    if node_name is None or node_name == self.master_node:
2216
      # No need to use SSH
2217
      result = utils.RunCmd(cmd)
2218
    else:
2219
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2220

    
2221
    if result.failed:
2222
      errmsg = ["Failed to run command %s" % result.cmd]
2223
      if node_name:
2224
        errmsg.append("on node %s" % node_name)
2225
      errmsg.append(": exitcode %s and error %s" %
2226
                    (result.exit_code, result.output))
2227
      raise errors.OpExecError(" ".join(errmsg))
2228

    
2229
  def Call(self, fn, *args):
2230
    """Call function while all daemons are stopped.
2231

2232
    @type fn: callable
2233
    @param fn: Function to be called
2234

2235
    """
2236
    # Pause watcher by acquiring an exclusive lock on watcher state file
2237
    self.feedback_fn("Blocking watcher")
2238
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2239
    try:
2240
      # TODO: Currently, this just blocks. There's no timeout.
2241
      # TODO: Should it be a shared lock?
2242
      watcher_block.Exclusive(blocking=True)
2243

    
2244
      # Stop master daemons, so that no new jobs can come in and all running
2245
      # ones are finished
2246
      self.feedback_fn("Stopping master daemons")
2247
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2248
      try:
2249
        # Stop daemons on all nodes
2250
        for node_name in self.online_nodes:
2251
          self.feedback_fn("Stopping daemons on %s" % node_name)
2252
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2253

    
2254
        # All daemons are shut down now
2255
        try:
2256
          return fn(self, *args)
2257
        except Exception, err:
2258
          _, errmsg = FormatError(err)
2259
          logging.exception("Caught exception")
2260
          self.feedback_fn(errmsg)
2261
          raise
2262
      finally:
2263
        # Start cluster again, master node last
2264
        for node_name in self.nonmaster_nodes + [self.master_node]:
2265
          self.feedback_fn("Starting daemons on %s" % node_name)
2266
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2267
    finally:
2268
      # Resume watcher
2269
      watcher_block.Close()
2270

    
2271

    
2272
def RunWhileClusterStopped(feedback_fn, fn, *args):
2273
  """Calls a function while all cluster daemons are stopped.
2274

2275
  @type feedback_fn: callable
2276
  @param feedback_fn: Feedback function
2277
  @type fn: callable
2278
  @param fn: Function to be called when daemons are stopped
2279

2280
  """
2281
  feedback_fn("Gathering cluster information")
2282

    
2283
  # This ensures we're running on the master daemon
2284
  cl = GetClient()
2285

    
2286
  (cluster_name, master_node) = \
2287
    cl.QueryConfigValues(["cluster_name", "master_node"])
2288

    
2289
  online_nodes = GetOnlineNodes([], cl=cl)
2290

    
2291
  # Don't keep a reference to the client. The master daemon will go away.
2292
  del cl
2293

    
2294
  assert master_node in online_nodes
2295

    
2296
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2297
                                       online_nodes).Call(fn, *args)
2298

    
2299

    
2300
def GenerateTable(headers, fields, separator, data,
2301
                  numfields=None, unitfields=None,
2302
                  units=None):
2303
  """Prints a table with headers and different fields.
2304

2305
  @type headers: dict
2306
  @param headers: dictionary mapping field names to headers for
2307
      the table
2308
  @type fields: list
2309
  @param fields: the field names corresponding to each row in
2310
      the data field
2311
  @param separator: the separator to be used; if this is None,
2312
      the default 'smart' algorithm is used which computes optimal
2313
      field width, otherwise just the separator is used between
2314
      each field
2315
  @type data: list
2316
  @param data: a list of lists, each sublist being one row to be output
2317
  @type numfields: list
2318
  @param numfields: a list with the fields that hold numeric
2319
      values and thus should be right-aligned
2320
  @type unitfields: list
2321
  @param unitfields: a list with the fields that hold numeric
2322
      values that should be formatted with the units field
2323
  @type units: string or None
2324
  @param units: the units we should use for formatting, or None for
2325
      automatic choice (human-readable for non-separator usage, otherwise
2326
      megabytes); this is a one-letter string
2327

2328
  """
2329
  if units is None:
2330
    if separator:
2331
      units = "m"
2332
    else:
2333
      units = "h"
2334

    
2335
  if numfields is None:
2336
    numfields = []
2337
  if unitfields is None:
2338
    unitfields = []
2339

    
2340
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2341
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2342

    
2343
  format_fields = []
2344
  for field in fields:
2345
    if headers and field not in headers:
2346
      # TODO: handle better unknown fields (either revert to old
2347
      # style of raising exception, or deal more intelligently with
2348
      # variable fields)
2349
      headers[field] = field
2350
    if separator is not None:
2351
      format_fields.append("%s")
2352
    elif numfields.Matches(field):
2353
      format_fields.append("%*s")
2354
    else:
2355
      format_fields.append("%-*s")
2356

    
2357
  if separator is None:
2358
    mlens = [0 for name in fields]
2359
    format_str = ' '.join(format_fields)
2360
  else:
2361
    format_str = separator.replace("%", "%%").join(format_fields)
2362

    
2363
  for row in data:
2364
    if row is None:
2365
      continue
2366
    for idx, val in enumerate(row):
2367
      if unitfields.Matches(fields[idx]):
2368
        try:
2369
          val = int(val)
2370
        except (TypeError, ValueError):
2371
          pass
2372
        else:
2373
          val = row[idx] = utils.FormatUnit(val, units)
2374
      val = row[idx] = str(val)
2375
      if separator is None:
2376
        mlens[idx] = max(mlens[idx], len(val))
2377

    
2378
  result = []
2379
  if headers:
2380
    args = []
2381
    for idx, name in enumerate(fields):
2382
      hdr = headers[name]
2383
      if separator is None:
2384
        mlens[idx] = max(mlens[idx], len(hdr))
2385
        args.append(mlens[idx])
2386
      args.append(hdr)
2387
    result.append(format_str % tuple(args))
2388

    
2389
  if separator is None:
2390
    assert len(mlens) == len(fields)
2391

    
2392
    if fields and not numfields.Matches(fields[-1]):
2393
      mlens[-1] = 0
2394

    
2395
  for line in data:
2396
    args = []
2397
    if line is None:
2398
      line = ['-' for _ in fields]
2399
    for idx in range(len(fields)):
2400
      if separator is None:
2401
        args.append(mlens[idx])
2402
      args.append(line[idx])
2403
    result.append(format_str % tuple(args))
2404

    
2405
  return result
2406

    
2407

    
2408
def _FormatBool(value):
2409
  """Formats a boolean value as a string.
2410

2411
  """
2412
  if value:
2413
    return "Y"
2414
  return "N"
2415

    
2416

    
2417
#: Default formatting for query results; (callback, align right)
2418
_DEFAULT_FORMAT_QUERY = {
2419
  constants.QFT_TEXT: (str, False),
2420
  constants.QFT_BOOL: (_FormatBool, False),
2421
  constants.QFT_NUMBER: (str, True),
2422
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2423
  constants.QFT_OTHER: (str, False),
2424
  constants.QFT_UNKNOWN: (str, False),
2425
  }
2426

    
2427

    
2428
def _GetColumnFormatter(fdef, override, unit):
2429
  """Returns formatting function for a field.
2430

2431
  @type fdef: L{objects.QueryFieldDefinition}
2432
  @type override: dict
2433
  @param override: Dictionary for overriding field formatting functions,
2434
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2435
  @type unit: string
2436
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2437
  @rtype: tuple; (callable, bool)
2438
  @return: Returns the function to format a value (takes one parameter) and a
2439
    boolean for aligning the value on the right-hand side
2440

2441
  """
2442
  fmt = override.get(fdef.name, None)
2443
  if fmt is not None:
2444
    return fmt
2445

    
2446
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2447

    
2448
  if fdef.kind == constants.QFT_UNIT:
2449
    # Can't keep this information in the static dictionary
2450
    return (lambda value: utils.FormatUnit(value, unit), True)
2451

    
2452
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2453
  if fmt is not None:
2454
    return fmt
2455

    
2456
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2457

    
2458

    
2459
class _QueryColumnFormatter:
2460
  """Callable class for formatting fields of a query.
2461

2462
  """
2463
  def __init__(self, fn, status_fn, verbose):
2464
    """Initializes this class.
2465

2466
    @type fn: callable
2467
    @param fn: Formatting function
2468
    @type status_fn: callable
2469
    @param status_fn: Function to report fields' status
2470
    @type verbose: boolean
2471
    @param verbose: whether to use verbose field descriptions or not
2472

2473
    """
2474
    self._fn = fn
2475
    self._status_fn = status_fn
2476
    self._verbose = verbose
2477

    
2478
  def __call__(self, data):
2479
    """Returns a field's string representation.
2480

2481
    """
2482
    (status, value) = data
2483

    
2484
    # Report status
2485
    self._status_fn(status)
2486

    
2487
    if status == constants.RS_NORMAL:
2488
      return self._fn(value)
2489

    
2490
    assert value is None, \
2491
           "Found value %r for abnormal status %s" % (value, status)
2492

    
2493
    return FormatResultError(status, self._verbose)
2494

    
2495

    
2496
def FormatResultError(status, verbose):
2497
  """Formats result status other than L{constants.RS_NORMAL}.
2498

2499
  @param status: The result status
2500
  @type verbose: boolean
2501
  @param verbose: Whether to return the verbose text
2502
  @return: Text of result status
2503

2504
  """
2505
  assert status != constants.RS_NORMAL, \
2506
         "FormatResultError called with status equal to constants.RS_NORMAL"
2507
  try:
2508
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2509
  except KeyError:
2510
    raise NotImplementedError("Unknown status %s" % status)
2511
  else:
2512
    if verbose:
2513
      return verbose_text
2514
    return normal_text
2515

    
2516

    
2517
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2518
                      header=False, verbose=False):
2519
  """Formats data in L{objects.QueryResponse}.
2520

2521
  @type result: L{objects.QueryResponse}
2522
  @param result: result of query operation
2523
  @type unit: string
2524
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2525
    see L{utils.text.FormatUnit}
2526
  @type format_override: dict
2527
  @param format_override: Dictionary for overriding field formatting functions,
2528
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2529
  @type separator: string or None
2530
  @param separator: String used to separate fields
2531
  @type header: bool
2532
  @param header: Whether to output header row
2533
  @type verbose: boolean
2534
  @param verbose: whether to use verbose field descriptions or not
2535

2536
  """
2537
  if unit is None:
2538
    if separator:
2539
      unit = "m"
2540
    else:
2541
      unit = "h"
2542

    
2543
  if format_override is None:
2544
    format_override = {}
2545

    
2546
  stats = dict.fromkeys(constants.RS_ALL, 0)
2547

    
2548
  def _RecordStatus(status):
2549
    if status in stats:
2550
      stats[status] += 1
2551

    
2552
  columns = []
2553
  for fdef in result.fields:
2554
    assert fdef.title and fdef.name
2555
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2556
    columns.append(TableColumn(fdef.title,
2557
                               _QueryColumnFormatter(fn, _RecordStatus,
2558
                                                     verbose),
2559
                               align_right))
2560

    
2561
  table = FormatTable(result.data, columns, header, separator)
2562

    
2563
  # Collect statistics
2564
  assert len(stats) == len(constants.RS_ALL)
2565
  assert compat.all(count >= 0 for count in stats.values())
2566

    
2567
  # Determine overall status. If there was no data, unknown fields must be
2568
  # detected via the field definitions.
2569
  if (stats[constants.RS_UNKNOWN] or
2570
      (not result.data and _GetUnknownFields(result.fields))):
2571
    status = QR_UNKNOWN
2572
  elif compat.any(count > 0 for key, count in stats.items()
2573
                  if key != constants.RS_NORMAL):
2574
    status = QR_INCOMPLETE
2575
  else:
2576
    status = QR_NORMAL
2577

    
2578
  return (status, table)
2579

    
2580

    
2581
def _GetUnknownFields(fdefs):
2582
  """Returns list of unknown fields included in C{fdefs}.
2583

2584
  @type fdefs: list of L{objects.QueryFieldDefinition}
2585

2586
  """
2587
  return [fdef for fdef in fdefs
2588
          if fdef.kind == constants.QFT_UNKNOWN]
2589

    
2590

    
2591
def _WarnUnknownFields(fdefs):
2592
  """Prints a warning to stderr if a query included unknown fields.
2593

2594
  @type fdefs: list of L{objects.QueryFieldDefinition}
2595

2596
  """
2597
  unknown = _GetUnknownFields(fdefs)
2598
  if unknown:
2599
    ToStderr("Warning: Queried for unknown fields %s",
2600
             utils.CommaJoin(fdef.name for fdef in unknown))
2601
    return True
2602

    
2603
  return False
2604

    
2605

    
2606
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2607
                format_override=None, verbose=False, force_filter=False):
2608
  """Generic implementation for listing all items of a resource.
2609

2610
  @param resource: One of L{constants.QR_VIA_LUXI}
2611
  @type fields: list of strings
2612
  @param fields: List of fields to query for
2613
  @type names: list of strings
2614
  @param names: Names of items to query for
2615
  @type unit: string or None
2616
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2617
    None for automatic choice (human-readable for non-separator usage,
2618
    otherwise megabytes); this is a one-letter string
2619
  @type separator: string or None
2620
  @param separator: String used to separate fields
2621
  @type header: bool
2622
  @param header: Whether to show header row
2623
  @type force_filter: bool
2624
  @param force_filter: Whether to always treat names as filter
2625
  @type format_override: dict
2626
  @param format_override: Dictionary for overriding field formatting functions,
2627
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2628
  @type verbose: boolean
2629
  @param verbose: whether to use verbose field descriptions or not
2630

2631
  """
2632
  if cl is None:
2633
    cl = GetClient()
2634

    
2635
  if not names:
2636
    names = None
2637

    
2638
  if (force_filter or
2639
      (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
2640
    try:
2641
      (filter_text, ) = names
2642
    except ValueError:
2643
      raise errors.OpPrereqError("Exactly one argument must be given as a"
2644
                                 " filter")
2645

    
2646
    logging.debug("Parsing '%s' as filter", filter_text)
2647
    filter_ = qlang.ParseFilter(filter_text)
2648
  else:
2649
    filter_ = qlang.MakeSimpleFilter("name", names)
2650

    
2651
  response = cl.Query(resource, fields, filter_)
2652

    
2653
  found_unknown = _WarnUnknownFields(response.fields)
2654

    
2655
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2656
                                     header=header,
2657
                                     format_override=format_override,
2658
                                     verbose=verbose)
2659

    
2660
  for line in data:
2661
    ToStdout(line)
2662

    
2663
  assert ((found_unknown and status == QR_UNKNOWN) or
2664
          (not found_unknown and status != QR_UNKNOWN))
2665

    
2666
  if status == QR_UNKNOWN:
2667
    return constants.EXIT_UNKNOWN_FIELD
2668

    
2669
  # TODO: Should the list command fail if not all data could be collected?
2670
  return constants.EXIT_SUCCESS
2671

    
2672

    
2673
def GenericListFields(resource, fields, separator, header, cl=None):
2674
  """Generic implementation for listing fields for a resource.
2675

2676
  @param resource: One of L{constants.QR_VIA_LUXI}
2677
  @type fields: list of strings
2678
  @param fields: List of fields to query for
2679
  @type separator: string or None
2680
  @param separator: String used to separate fields
2681
  @type header: bool
2682
  @param header: Whether to show header row
2683

2684
  """
2685
  if cl is None:
2686
    cl = GetClient()
2687

    
2688
  if not fields:
2689
    fields = None
2690

    
2691
  response = cl.QueryFields(resource, fields)
2692

    
2693
  found_unknown = _WarnUnknownFields(response.fields)
2694

    
2695
  columns = [
2696
    TableColumn("Name", str, False),
2697
    TableColumn("Title", str, False),
2698
    TableColumn("Description", str, False),
2699
    ]
2700

    
2701
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2702

    
2703
  for line in FormatTable(rows, columns, header, separator):
2704
    ToStdout(line)
2705

    
2706
  if found_unknown:
2707
    return constants.EXIT_UNKNOWN_FIELD
2708

    
2709
  return constants.EXIT_SUCCESS
2710

    
2711

    
2712
class TableColumn:
2713
  """Describes a column for L{FormatTable}.
2714

2715
  """
2716
  def __init__(self, title, fn, align_right):
2717
    """Initializes this class.
2718

2719
    @type title: string
2720
    @param title: Column title
2721
    @type fn: callable
2722
    @param fn: Formatting function
2723
    @type align_right: bool
2724
    @param align_right: Whether to align values on the right-hand side
2725

2726
    """
2727
    self.title = title
2728
    self.format = fn
2729
    self.align_right = align_right
2730

    
2731

    
2732
def _GetColFormatString(width, align_right):
2733
  """Returns the format string for a field.
2734

2735
  """
2736
  if align_right:
2737
    sign = ""
2738
  else:
2739
    sign = "-"
2740

    
2741
  return "%%%s%ss" % (sign, width)
2742

    
2743

    
2744
def FormatTable(rows, columns, header, separator):
2745
  """Formats data as a table.
2746

2747
  @type rows: list of lists
2748
  @param rows: Row data, one list per row
2749
  @type columns: list of L{TableColumn}
2750
  @param columns: Column descriptions
2751
  @type header: bool
2752
  @param header: Whether to show header row
2753
  @type separator: string or None
2754
  @param separator: String used to separate columns
2755

2756
  """
2757
  if header:
2758
    data = [[col.title for col in columns]]
2759
    colwidth = [len(col.title) for col in columns]
2760
  else:
2761
    data = []
2762
    colwidth = [0 for _ in columns]
2763

    
2764
  # Format row data
2765
  for row in rows:
2766
    assert len(row) == len(columns)
2767

    
2768
    formatted = [col.format(value) for value, col in zip(row, columns)]
2769

    
2770
    if separator is None:
2771
      # Update column widths
2772
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2773
        # Modifying a list's items while iterating is fine
2774
        colwidth[idx] = max(oldwidth, len(value))
2775

    
2776
    data.append(formatted)
2777

    
2778
  if separator is not None:
2779
    # Return early if a separator is used
2780
    return [separator.join(row) for row in data]
2781

    
2782
  if columns and not columns[-1].align_right:
2783
    # Avoid unnecessary spaces at end of line
2784
    colwidth[-1] = 0
2785

    
2786
  # Build format string
2787
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2788
                  for col, width in zip(columns, colwidth)])
2789

    
2790
  return [fmt % tuple(row) for row in data]
2791

    
2792

    
2793
def FormatTimestamp(ts):
2794
  """Formats a given timestamp.
2795

2796
  @type ts: timestamp
2797
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2798

2799
  @rtype: string
2800
  @return: a string with the formatted timestamp
2801

2802
  """
2803
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2804
    return '?'
2805
  sec, usec = ts
2806
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2807

    
2808

    
2809
def ParseTimespec(value):
2810
  """Parse a time specification.
2811

2812
  The following suffixed will be recognized:
2813

2814
    - s: seconds
2815
    - m: minutes
2816
    - h: hours
2817
    - d: day
2818
    - w: weeks
2819

2820
  Without any suffix, the value will be taken to be in seconds.
2821

2822
  """
2823
  value = str(value)
2824
  if not value:
2825
    raise errors.OpPrereqError("Empty time specification passed")
2826
  suffix_map = {
2827
    's': 1,
2828
    'm': 60,
2829
    'h': 3600,
2830
    'd': 86400,
2831
    'w': 604800,
2832
    }
2833
  if value[-1] not in suffix_map:
2834
    try:
2835
      value = int(value)
2836
    except (TypeError, ValueError):
2837
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2838
  else:
2839
    multiplier = suffix_map[value[-1]]
2840
    value = value[:-1]
2841
    if not value: # no data left after stripping the suffix
2842
      raise errors.OpPrereqError("Invalid time specification (only"
2843
                                 " suffix passed)")
2844
    try:
2845
      value = int(value) * multiplier
2846
    except (TypeError, ValueError):
2847
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2848
  return value
2849

    
2850

    
2851
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2852
                   filter_master=False):
2853
  """Returns the names of online nodes.
2854

2855
  This function will also log a warning on stderr with the names of
2856
  the online nodes.
2857

2858
  @param nodes: if not empty, use only this subset of nodes (minus the
2859
      offline ones)
2860
  @param cl: if not None, luxi client to use
2861
  @type nowarn: boolean
2862
  @param nowarn: by default, this function will output a note with the
2863
      offline nodes that are skipped; if this parameter is True the
2864
      note is not displayed
2865
  @type secondary_ips: boolean
2866
  @param secondary_ips: if True, return the secondary IPs instead of the
2867
      names, useful for doing network traffic over the replication interface
2868
      (if any)
2869
  @type filter_master: boolean
2870
  @param filter_master: if True, do not return the master node in the list
2871
      (useful in coordination with secondary_ips where we cannot check our
2872
      node name against the list)
2873

2874
  """
2875
  if cl is None:
2876
    cl = GetClient()
2877

    
2878
  if secondary_ips:
2879
    name_idx = 2
2880
  else:
2881
    name_idx = 0
2882

    
2883
  if filter_master:
2884
    master_node = cl.QueryConfigValues(["master_node"])[0]
2885
    filter_fn = lambda x: x != master_node
2886
  else:
2887
    filter_fn = lambda _: True
2888

    
2889
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2890
                         use_locking=False)
2891
  offline = [row[0] for row in result if row[1]]
2892
  if offline and not nowarn:
2893
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2894
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2895

    
2896

    
2897
def _ToStream(stream, txt, *args):
2898
  """Write a message to a stream, bypassing the logging system
2899

2900
  @type stream: file object
2901
  @param stream: the file to which we should write
2902
  @type txt: str
2903
  @param txt: the message
2904

2905
  """
2906
  try:
2907
    if args:
2908
      args = tuple(args)
2909
      stream.write(txt % args)
2910
    else:
2911
      stream.write(txt)
2912
    stream.write('\n')
2913
    stream.flush()
2914
  except IOError, err:
2915
    if err.errno == errno.EPIPE:
2916
      # our terminal went away, we'll exit
2917
      sys.exit(constants.EXIT_FAILURE)
2918
    else:
2919
      raise
2920

    
2921

    
2922
def ToStdout(txt, *args):
2923
  """Write a message to stdout only, bypassing the logging system
2924

2925
  This is just a wrapper over _ToStream.
2926

2927
  @type txt: str
2928
  @param txt: the message
2929

2930
  """
2931
  _ToStream(sys.stdout, txt, *args)
2932

    
2933

    
2934
def ToStderr(txt, *args):
2935
  """Write a message to stderr only, bypassing the logging system
2936

2937
  This is just a wrapper over _ToStream.
2938

2939
  @type txt: str
2940
  @param txt: the message
2941

2942
  """
2943
  _ToStream(sys.stderr, txt, *args)
2944

    
2945

    
2946
class JobExecutor(object):
2947
  """Class which manages the submission and execution of multiple jobs.
2948

2949
  Note that instances of this class should not be reused between
2950
  GetResults() calls.
2951

2952
  """
2953
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2954
    self.queue = []
2955
    if cl is None:
2956
      cl = GetClient()
2957
    self.cl = cl
2958
    self.verbose = verbose
2959
    self.jobs = []
2960
    self.opts = opts
2961
    self.feedback_fn = feedback_fn
2962
    self._counter = itertools.count()
2963

    
2964
  @staticmethod
2965
  def _IfName(name, fmt):
2966
    """Helper function for formatting name.
2967

2968
    """
2969
    if name:
2970
      return fmt % name
2971

    
2972
    return ""
2973

    
2974
  def QueueJob(self, name, *ops):
2975
    """Record a job for later submit.
2976

2977
    @type name: string
2978
    @param name: a description of the job, will be used in WaitJobSet
2979

2980
    """
2981
    SetGenericOpcodeOpts(ops, self.opts)
2982
    self.queue.append((self._counter.next(), name, ops))
2983

    
2984
  def AddJobId(self, name, status, job_id):
2985
    """Adds a job ID to the internal queue.
2986

2987
    """
2988
    self.jobs.append((self._counter.next(), status, job_id, name))
2989

    
2990
  def SubmitPending(self, each=False):
2991
    """Submit all pending jobs.
2992

2993
    """
2994
    if each:
2995
      results = []
2996
      for (_, _, ops) in self.queue:
2997
        # SubmitJob will remove the success status, but raise an exception if
2998
        # the submission fails, so we'll notice that anyway.
2999
        results.append([True, self.cl.SubmitJob(ops)])
3000
    else:
3001
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3002
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3003
      self.jobs.append((idx, status, data, name))
3004

    
3005
  def _ChooseJob(self):
3006
    """Choose a non-waiting/queued job to poll next.
3007

3008
    """
3009
    assert self.jobs, "_ChooseJob called with empty job list"
3010

    
3011
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3012
    assert result
3013

    
3014
    for job_data, status in zip(self.jobs, result):
3015
      if (isinstance(status, list) and status and
3016
          status[0] in (constants.JOB_STATUS_QUEUED,
3017
                        constants.JOB_STATUS_WAITLOCK,
3018
                        constants.JOB_STATUS_CANCELING)):
3019
        # job is still present and waiting
3020
        continue
3021
      # good candidate found (either running job or lost job)
3022
      self.jobs.remove(job_data)
3023
      return job_data
3024

    
3025
    # no job found
3026
    return self.jobs.pop(0)
3027

    
3028
  def GetResults(self):
3029
    """Wait for and return the results of all jobs.
3030

3031
    @rtype: list
3032
    @return: list of tuples (success, job results), in the same order
3033
        as the submitted jobs; if a job has failed, instead of the result
3034
        there will be the error message
3035

3036
    """
3037
    if not self.jobs:
3038
      self.SubmitPending()
3039
    results = []
3040
    if self.verbose:
3041
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3042
      if ok_jobs:
3043
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3044

    
3045
    # first, remove any non-submitted jobs
3046
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3047
    for idx, _, jid, name in failures:
3048
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3049
      results.append((idx, False, jid))
3050

    
3051
    while self.jobs:
3052
      (idx, _, jid, name) = self._ChooseJob()
3053
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3054
      try:
3055
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3056
        success = True
3057
      except errors.JobLost, err:
3058
        _, job_result = FormatError(err)
3059
        ToStderr("Job %s%s has been archived, cannot check its result",
3060
                 jid, self._IfName(name, " for %s"))
3061
        success = False
3062
      except (errors.GenericError, luxi.ProtocolError), err:
3063
        _, job_result = FormatError(err)
3064
        success = False
3065
        # the error message will always be shown, verbose or not
3066
        ToStderr("Job %s%s has failed: %s",
3067
                 jid, self._IfName(name, " for %s"), job_result)
3068

    
3069
      results.append((idx, success, job_result))
3070

    
3071
    # sort based on the index, then drop it
3072
    results.sort()
3073
    results = [i[1:] for i in results]
3074

    
3075
    return results
3076

    
3077
  def WaitOrShow(self, wait):
3078
    """Wait for job results or only print the job IDs.
3079

3080
    @type wait: boolean
3081
    @param wait: whether to wait or not
3082

3083
    """
3084
    if wait:
3085
      return self.GetResults()
3086
    else:
3087
      if not self.jobs:
3088
        self.SubmitPending()
3089
      for _, status, result, name in self.jobs:
3090
        if status:
3091
          ToStdout("%s: %s", result, name)
3092
        else:
3093
          ToStderr("Failure for %s: %s", name, result)
3094
      return [row[1:3] for row in self.jobs]
3095

    
3096

    
3097
def FormatParameterDict(buf, param_dict, actual, level=1):
3098
  """Formats a parameter dictionary.
3099

3100
  @type buf: L{StringIO}
3101
  @param buf: the buffer into which to write
3102
  @type param_dict: dict
3103
  @param param_dict: the own parameters
3104
  @type actual: dict
3105
  @param actual: the current parameter set (including defaults)
3106
  @param level: Level of indent
3107

3108
  """
3109
  indent = "  " * level
3110
  for key in sorted(actual):
3111
    val = param_dict.get(key, "default (%s)" % actual[key])
3112
    buf.write("%s- %s: %s\n" % (indent, key, val))
3113

    
3114

    
3115
def ConfirmOperation(names, list_type, text, extra=""):
3116
  """Ask the user to confirm an operation on a list of list_type.
3117

3118
  This function is used to request confirmation for doing an operation
3119
  on a given list of list_type.
3120

3121
  @type names: list
3122
  @param names: the list of names that we display when
3123
      we ask for confirmation
3124
  @type list_type: str
3125
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3126
  @type text: str
3127
  @param text: the operation that the user should confirm
3128
  @rtype: boolean
3129
  @return: True or False depending on user's confirmation.
3130

3131
  """
3132
  count = len(names)
3133
  msg = ("The %s will operate on %d %s.\n%s"
3134
         "Do you want to continue?" % (text, count, list_type, extra))
3135
  affected = (("\nAffected %s:\n" % list_type) +
3136
              "\n".join(["  %s" % name for name in names]))
3137

    
3138
  choices = [("y", True, "Yes, execute the %s" % text),
3139
             ("n", False, "No, abort the %s" % text)]
3140

    
3141
  if count > 20:
3142
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3143
    question = msg
3144
  else:
3145
    question = msg + affected
3146

    
3147
  choice = AskUser(question, choices)
3148
  if choice == "v":
3149
    choices.pop(1)
3150
    choice = AskUser(msg + affected, choices)
3151
  return choice