Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ aafee533

History | View | Annotate | Download (101.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
from cStringIO import StringIO
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import opcodes
38
from ganeti import luxi
39
from ganeti import ssconf
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45

    
46
from optparse import (OptionParser, TitledHelpFormatter,
47
                      Option, OptionValueError)
48

    
49

    
50
__all__ = [
51
  # Command line options
52
  "ADD_UIDS_OPT",
53
  "ALLOCATABLE_OPT",
54
  "ALLOC_POLICY_OPT",
55
  "ALL_OPT",
56
  "ALLOW_FAILOVER_OPT",
57
  "AUTO_PROMOTE_OPT",
58
  "AUTO_REPLACE_OPT",
59
  "BACKEND_OPT",
60
  "BLK_OS_OPT",
61
  "CAPAB_MASTER_OPT",
62
  "CAPAB_VM_OPT",
63
  "CLEANUP_OPT",
64
  "CLUSTER_DOMAIN_SECRET_OPT",
65
  "CONFIRM_OPT",
66
  "CP_SIZE_OPT",
67
  "DEBUG_OPT",
68
  "DEBUG_SIMERR_OPT",
69
  "DISKIDX_OPT",
70
  "DISK_OPT",
71
  "DISK_TEMPLATE_OPT",
72
  "DRAINED_OPT",
73
  "DRY_RUN_OPT",
74
  "DRBD_HELPER_OPT",
75
  "DST_NODE_OPT",
76
  "EARLY_RELEASE_OPT",
77
  "ENABLED_HV_OPT",
78
  "ERROR_CODES_OPT",
79
  "FIELDS_OPT",
80
  "FILESTORE_DIR_OPT",
81
  "FILESTORE_DRIVER_OPT",
82
  "FORCE_FILTER_OPT",
83
  "FORCE_OPT",
84
  "FORCE_VARIANT_OPT",
85
  "GLOBAL_FILEDIR_OPT",
86
  "HID_OS_OPT",
87
  "GLOBAL_SHARED_FILEDIR_OPT",
88
  "HVLIST_OPT",
89
  "HVOPTS_OPT",
90
  "HYPERVISOR_OPT",
91
  "IALLOCATOR_OPT",
92
  "DEFAULT_IALLOCATOR_OPT",
93
  "IDENTIFY_DEFAULTS_OPT",
94
  "IGNORE_CONSIST_OPT",
95
  "IGNORE_FAILURES_OPT",
96
  "IGNORE_OFFLINE_OPT",
97
  "IGNORE_REMOVE_FAILURES_OPT",
98
  "IGNORE_SECONDARIES_OPT",
99
  "IGNORE_SIZE_OPT",
100
  "INTERVAL_OPT",
101
  "MAC_PREFIX_OPT",
102
  "MAINTAIN_NODE_HEALTH_OPT",
103
  "MASTER_NETDEV_OPT",
104
  "MC_OPT",
105
  "MIGRATION_MODE_OPT",
106
  "NET_OPT",
107
  "NEW_CLUSTER_CERT_OPT",
108
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109
  "NEW_CONFD_HMAC_KEY_OPT",
110
  "NEW_RAPI_CERT_OPT",
111
  "NEW_SECONDARY_OPT",
112
  "NIC_PARAMS_OPT",
113
  "NODE_FORCE_JOIN_OPT",
114
  "NODE_LIST_OPT",
115
  "NODE_PLACEMENT_OPT",
116
  "NODEGROUP_OPT",
117
  "NODE_PARAMS_OPT",
118
  "NODE_POWERED_OPT",
119
  "NODRBD_STORAGE_OPT",
120
  "NOHDR_OPT",
121
  "NOIPCHECK_OPT",
122
  "NO_INSTALL_OPT",
123
  "NONAMECHECK_OPT",
124
  "NOLVM_STORAGE_OPT",
125
  "NOMODIFY_ETCHOSTS_OPT",
126
  "NOMODIFY_SSH_SETUP_OPT",
127
  "NONICS_OPT",
128
  "NONLIVE_OPT",
129
  "NONPLUS1_OPT",
130
  "NOSHUTDOWN_OPT",
131
  "NOSTART_OPT",
132
  "NOSSH_KEYCHECK_OPT",
133
  "NOVOTING_OPT",
134
  "NO_REMEMBER_OPT",
135
  "NWSYNC_OPT",
136
  "ON_PRIMARY_OPT",
137
  "ON_SECONDARY_OPT",
138
  "OFFLINE_OPT",
139
  "OSPARAMS_OPT",
140
  "OS_OPT",
141
  "OS_SIZE_OPT",
142
  "OOB_TIMEOUT_OPT",
143
  "POWER_DELAY_OPT",
144
  "PREALLOC_WIPE_DISKS_OPT",
145
  "PRIMARY_IP_VERSION_OPT",
146
  "PRIMARY_ONLY_OPT",
147
  "PRIORITY_OPT",
148
  "RAPI_CERT_OPT",
149
  "READD_OPT",
150
  "REBOOT_TYPE_OPT",
151
  "REMOVE_INSTANCE_OPT",
152
  "REMOVE_UIDS_OPT",
153
  "RESERVED_LVS_OPT",
154
  "ROMAN_OPT",
155
  "SECONDARY_IP_OPT",
156
  "SECONDARY_ONLY_OPT",
157
  "SELECT_OS_OPT",
158
  "SEP_OPT",
159
  "SHOWCMD_OPT",
160
  "SHUTDOWN_TIMEOUT_OPT",
161
  "SINGLE_NODE_OPT",
162
  "SRC_DIR_OPT",
163
  "SRC_NODE_OPT",
164
  "SUBMIT_OPT",
165
  "STATIC_OPT",
166
  "SYNC_OPT",
167
  "TAG_ADD_OPT",
168
  "TAG_SRC_OPT",
169
  "TIMEOUT_OPT",
170
  "UIDPOOL_OPT",
171
  "USEUNITS_OPT",
172
  "USE_REPL_NET_OPT",
173
  "VERBOSE_OPT",
174
  "VG_NAME_OPT",
175
  "YES_DOIT_OPT",
176
  # Generic functions for CLI programs
177
  "ConfirmOperation",
178
  "GenericMain",
179
  "GenericInstanceCreate",
180
  "GenericList",
181
  "GenericListFields",
182
  "GetClient",
183
  "GetOnlineNodes",
184
  "JobExecutor",
185
  "JobSubmittedException",
186
  "ParseTimespec",
187
  "RunWhileClusterStopped",
188
  "SubmitOpCode",
189
  "SubmitOrSend",
190
  "UsesRPC",
191
  # Formatting functions
192
  "ToStderr", "ToStdout",
193
  "FormatError",
194
  "FormatQueryResult",
195
  "FormatParameterDict",
196
  "GenerateTable",
197
  "AskUser",
198
  "FormatTimestamp",
199
  "FormatLogMessage",
200
  # Tags functions
201
  "ListTags",
202
  "AddTags",
203
  "RemoveTags",
204
  # command line options support infrastructure
205
  "ARGS_MANY_INSTANCES",
206
  "ARGS_MANY_NODES",
207
  "ARGS_MANY_GROUPS",
208
  "ARGS_NONE",
209
  "ARGS_ONE_INSTANCE",
210
  "ARGS_ONE_NODE",
211
  "ARGS_ONE_GROUP",
212
  "ARGS_ONE_OS",
213
  "ArgChoice",
214
  "ArgCommand",
215
  "ArgFile",
216
  "ArgGroup",
217
  "ArgHost",
218
  "ArgInstance",
219
  "ArgJobId",
220
  "ArgNode",
221
  "ArgOs",
222
  "ArgSuggest",
223
  "ArgUnknown",
224
  "OPT_COMPL_INST_ADD_NODES",
225
  "OPT_COMPL_MANY_NODES",
226
  "OPT_COMPL_ONE_IALLOCATOR",
227
  "OPT_COMPL_ONE_INSTANCE",
228
  "OPT_COMPL_ONE_NODE",
229
  "OPT_COMPL_ONE_NODEGROUP",
230
  "OPT_COMPL_ONE_OS",
231
  "cli_option",
232
  "SplitNodeOption",
233
  "CalculateOSNames",
234
  "ParseFields",
235
  "COMMON_CREATE_OPTS",
236
  ]
237

    
238
NO_PREFIX = "no_"
239
UN_PREFIX = "-"
240

    
241
#: Priorities (sorted)
242
_PRIORITY_NAMES = [
243
  ("low", constants.OP_PRIO_LOW),
244
  ("normal", constants.OP_PRIO_NORMAL),
245
  ("high", constants.OP_PRIO_HIGH),
246
  ]
247

    
248
#: Priority dictionary for easier lookup
249
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
250
# we migrate to Python 2.6
251
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
252

    
253
# Query result status for clients
254
(QR_NORMAL,
255
 QR_UNKNOWN,
256
 QR_INCOMPLETE) = range(3)
257

    
258

    
259
class _Argument:
260
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
261
    self.min = min
262
    self.max = max
263

    
264
  def __repr__(self):
265
    return ("<%s min=%s max=%s>" %
266
            (self.__class__.__name__, self.min, self.max))
267

    
268

    
269
class ArgSuggest(_Argument):
270
  """Suggesting argument.
271

272
  Value can be any of the ones passed to the constructor.
273

274
  """
275
  # pylint: disable-msg=W0622
276
  def __init__(self, min=0, max=None, choices=None):
277
    _Argument.__init__(self, min=min, max=max)
278
    self.choices = choices
279

    
280
  def __repr__(self):
281
    return ("<%s min=%s max=%s choices=%r>" %
282
            (self.__class__.__name__, self.min, self.max, self.choices))
283

    
284

    
285
class ArgChoice(ArgSuggest):
286
  """Choice argument.
287

288
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
289
  but value must be one of the choices.
290

291
  """
292

    
293

    
294
class ArgUnknown(_Argument):
295
  """Unknown argument to program (e.g. determined at runtime).
296

297
  """
298

    
299

    
300
class ArgInstance(_Argument):
301
  """Instances argument.
302

303
  """
304

    
305

    
306
class ArgNode(_Argument):
307
  """Node argument.
308

309
  """
310

    
311

    
312
class ArgGroup(_Argument):
313
  """Node group argument.
314

315
  """
316

    
317

    
318
class ArgJobId(_Argument):
319
  """Job ID argument.
320

321
  """
322

    
323

    
324
class ArgFile(_Argument):
325
  """File path argument.
326

327
  """
328

    
329

    
330
class ArgCommand(_Argument):
331
  """Command argument.
332

333
  """
334

    
335

    
336
class ArgHost(_Argument):
337
  """Host argument.
338

339
  """
340

    
341

    
342
class ArgOs(_Argument):
343
  """OS argument.
344

345
  """
346

    
347

    
348
ARGS_NONE = []
349
ARGS_MANY_INSTANCES = [ArgInstance()]
350
ARGS_MANY_NODES = [ArgNode()]
351
ARGS_MANY_GROUPS = [ArgGroup()]
352
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
353
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
354
# TODO
355
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
356
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
357

    
358

    
359
def _ExtractTagsObject(opts, args):
360
  """Extract the tag type object.
361

362
  Note that this function will modify its args parameter.
363

364
  """
365
  if not hasattr(opts, "tag_type"):
366
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
367
  kind = opts.tag_type
368
  if kind == constants.TAG_CLUSTER:
369
    retval = kind, kind
370
  elif kind in (constants.TAG_NODEGROUP,
371
                constants.TAG_NODE,
372
                constants.TAG_INSTANCE):
373
    if not args:
374
      raise errors.OpPrereqError("no arguments passed to the command")
375
    name = args.pop(0)
376
    retval = kind, name
377
  else:
378
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
379
  return retval
380

    
381

    
382
def _ExtendTags(opts, args):
383
  """Extend the args if a source file has been given.
384

385
  This function will extend the tags with the contents of the file
386
  passed in the 'tags_source' attribute of the opts parameter. A file
387
  named '-' will be replaced by stdin.
388

389
  """
390
  fname = opts.tags_source
391
  if fname is None:
392
    return
393
  if fname == "-":
394
    new_fh = sys.stdin
395
  else:
396
    new_fh = open(fname, "r")
397
  new_data = []
398
  try:
399
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
400
    # because of python bug 1633941
401
    while True:
402
      line = new_fh.readline()
403
      if not line:
404
        break
405
      new_data.append(line.strip())
406
  finally:
407
    new_fh.close()
408
  args.extend(new_data)
409

    
410

    
411
def ListTags(opts, args):
412
  """List the tags on a given object.
413

414
  This is a generic implementation that knows how to deal with all
415
  three cases of tag objects (cluster, node, instance). The opts
416
  argument is expected to contain a tag_type field denoting what
417
  object type we work on.
418

419
  """
420
  kind, name = _ExtractTagsObject(opts, args)
421
  cl = GetClient()
422
  result = cl.QueryTags(kind, name)
423
  result = list(result)
424
  result.sort()
425
  for tag in result:
426
    ToStdout(tag)
427

    
428

    
429
def AddTags(opts, args):
430
  """Add tags on a given object.
431

432
  This is a generic implementation that knows how to deal with all
433
  three cases of tag objects (cluster, node, instance). The opts
434
  argument is expected to contain a tag_type field denoting what
435
  object type we work on.
436

437
  """
438
  kind, name = _ExtractTagsObject(opts, args)
439
  _ExtendTags(opts, args)
440
  if not args:
441
    raise errors.OpPrereqError("No tags to be added")
442
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
443
  SubmitOpCode(op, opts=opts)
444

    
445

    
446
def RemoveTags(opts, args):
447
  """Remove tags from a given object.
448

449
  This is a generic implementation that knows how to deal with all
450
  three cases of tag objects (cluster, node, instance). The opts
451
  argument is expected to contain a tag_type field denoting what
452
  object type we work on.
453

454
  """
455
  kind, name = _ExtractTagsObject(opts, args)
456
  _ExtendTags(opts, args)
457
  if not args:
458
    raise errors.OpPrereqError("No tags to be removed")
459
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
460
  SubmitOpCode(op, opts=opts)
461

    
462

    
463
def check_unit(option, opt, value): # pylint: disable-msg=W0613
464
  """OptParsers custom converter for units.
465

466
  """
467
  try:
468
    return utils.ParseUnit(value)
469
  except errors.UnitParseError, err:
470
    raise OptionValueError("option %s: %s" % (opt, err))
471

    
472

    
473
def _SplitKeyVal(opt, data):
474
  """Convert a KeyVal string into a dict.
475

476
  This function will convert a key=val[,...] string into a dict. Empty
477
  values will be converted specially: keys which have the prefix 'no_'
478
  will have the value=False and the prefix stripped, the others will
479
  have value=True.
480

481
  @type opt: string
482
  @param opt: a string holding the option name for which we process the
483
      data, used in building error messages
484
  @type data: string
485
  @param data: a string of the format key=val,key=val,...
486
  @rtype: dict
487
  @return: {key=val, key=val}
488
  @raises errors.ParameterError: if there are duplicate keys
489

490
  """
491
  kv_dict = {}
492
  if data:
493
    for elem in utils.UnescapeAndSplit(data, sep=","):
494
      if "=" in elem:
495
        key, val = elem.split("=", 1)
496
      else:
497
        if elem.startswith(NO_PREFIX):
498
          key, val = elem[len(NO_PREFIX):], False
499
        elif elem.startswith(UN_PREFIX):
500
          key, val = elem[len(UN_PREFIX):], None
501
        else:
502
          key, val = elem, True
503
      if key in kv_dict:
504
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
505
                                    (key, opt))
506
      kv_dict[key] = val
507
  return kv_dict
508

    
509

    
510
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
511
  """Custom parser for ident:key=val,key=val options.
512

513
  This will store the parsed values as a tuple (ident, {key: val}). As such,
514
  multiple uses of this option via action=append is possible.
515

516
  """
517
  if ":" not in value:
518
    ident, rest = value, ''
519
  else:
520
    ident, rest = value.split(":", 1)
521

    
522
  if ident.startswith(NO_PREFIX):
523
    if rest:
524
      msg = "Cannot pass options when removing parameter groups: %s" % value
525
      raise errors.ParameterError(msg)
526
    retval = (ident[len(NO_PREFIX):], False)
527
  elif ident.startswith(UN_PREFIX):
528
    if rest:
529
      msg = "Cannot pass options when removing parameter groups: %s" % value
530
      raise errors.ParameterError(msg)
531
    retval = (ident[len(UN_PREFIX):], None)
532
  else:
533
    kv_dict = _SplitKeyVal(opt, rest)
534
    retval = (ident, kv_dict)
535
  return retval
536

    
537

    
538
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
539
  """Custom parser class for key=val,key=val options.
540

541
  This will store the parsed values as a dict {key: val}.
542

543
  """
544
  return _SplitKeyVal(opt, value)
545

    
546

    
547
def check_bool(option, opt, value): # pylint: disable-msg=W0613
548
  """Custom parser for yes/no options.
549

550
  This will store the parsed value as either True or False.
551

552
  """
553
  value = value.lower()
554
  if value == constants.VALUE_FALSE or value == "no":
555
    return False
556
  elif value == constants.VALUE_TRUE or value == "yes":
557
    return True
558
  else:
559
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
560

    
561

    
562
# completion_suggestion is normally a list. Using numeric values not evaluating
563
# to False for dynamic completion.
564
(OPT_COMPL_MANY_NODES,
565
 OPT_COMPL_ONE_NODE,
566
 OPT_COMPL_ONE_INSTANCE,
567
 OPT_COMPL_ONE_OS,
568
 OPT_COMPL_ONE_IALLOCATOR,
569
 OPT_COMPL_INST_ADD_NODES,
570
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
571

    
572
OPT_COMPL_ALL = frozenset([
573
  OPT_COMPL_MANY_NODES,
574
  OPT_COMPL_ONE_NODE,
575
  OPT_COMPL_ONE_INSTANCE,
576
  OPT_COMPL_ONE_OS,
577
  OPT_COMPL_ONE_IALLOCATOR,
578
  OPT_COMPL_INST_ADD_NODES,
579
  OPT_COMPL_ONE_NODEGROUP,
580
  ])
581

    
582

    
583
class CliOption(Option):
584
  """Custom option class for optparse.
585

586
  """
587
  ATTRS = Option.ATTRS + [
588
    "completion_suggest",
589
    ]
590
  TYPES = Option.TYPES + (
591
    "identkeyval",
592
    "keyval",
593
    "unit",
594
    "bool",
595
    )
596
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
597
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
598
  TYPE_CHECKER["keyval"] = check_key_val
599
  TYPE_CHECKER["unit"] = check_unit
600
  TYPE_CHECKER["bool"] = check_bool
601

    
602

    
603
# optparse.py sets make_option, so we do it for our own option class, too
604
cli_option = CliOption
605

    
606

    
607
_YORNO = "yes|no"
608

    
609
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
610
                       help="Increase debugging level")
611

    
612
NOHDR_OPT = cli_option("--no-headers", default=False,
613
                       action="store_true", dest="no_headers",
614
                       help="Don't display column headers")
615

    
616
SEP_OPT = cli_option("--separator", default=None,
617
                     action="store", dest="separator",
618
                     help=("Separator between output fields"
619
                           " (defaults to one space)"))
620

    
621
USEUNITS_OPT = cli_option("--units", default=None,
622
                          dest="units", choices=('h', 'm', 'g', 't'),
623
                          help="Specify units for output (one of h/m/g/t)")
624

    
625
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
626
                        type="string", metavar="FIELDS",
627
                        help="Comma separated list of output fields")
628

    
629
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
630
                       default=False, help="Force the operation")
631

    
632
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
633
                         default=False, help="Do not require confirmation")
634

    
635
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
636
                                  action="store_true", default=False,
637
                                  help=("Ignore offline nodes and do as much"
638
                                        " as possible"))
639

    
640
TAG_ADD_OPT = cli_option("--tags", dest="tags",
641
                         default=None, help="Comma-separated list of instance"
642
                                            " tags")
643

    
644
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
645
                         default=None, help="File with tag names")
646

    
647
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
648
                        default=False, action="store_true",
649
                        help=("Submit the job and return the job ID, but"
650
                              " don't wait for the job to finish"))
651

    
652
SYNC_OPT = cli_option("--sync", dest="do_locking",
653
                      default=False, action="store_true",
654
                      help=("Grab locks while doing the queries"
655
                            " in order to ensure more consistent results"))
656

    
657
DRY_RUN_OPT = cli_option("--dry-run", default=False,
658
                         action="store_true",
659
                         help=("Do not execute the operation, just run the"
660
                               " check steps and verify it it could be"
661
                               " executed"))
662

    
663
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
664
                         action="store_true",
665
                         help="Increase the verbosity of the operation")
666

    
667
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
668
                              action="store_true", dest="simulate_errors",
669
                              help="Debugging option that makes the operation"
670
                              " treat most runtime checks as failed")
671

    
672
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
673
                        default=True, action="store_false",
674
                        help="Don't wait for sync (DANGEROUS!)")
675

    
676
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
677
                               help=("Custom disk setup (%s)" %
678
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
679
                               default=None, metavar="TEMPL",
680
                               choices=list(constants.DISK_TEMPLATES))
681

    
682
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
683
                        help="Do not create any network cards for"
684
                        " the instance")
685

    
686
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
687
                               help="Relative path under default cluster-wide"
688
                               " file storage dir to store file-based disks",
689
                               default=None, metavar="<DIR>")
690

    
691
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
692
                                  help="Driver to use for image files",
693
                                  default="loop", metavar="<DRIVER>",
694
                                  choices=list(constants.FILE_DRIVER))
695

    
696
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
697
                            help="Select nodes for the instance automatically"
698
                            " using the <NAME> iallocator plugin",
699
                            default=None, type="string",
700
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
701

    
702
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
703
                            metavar="<NAME>",
704
                            help="Set the default instance allocator plugin",
705
                            default=None, type="string",
706
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
707

    
708
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
709
                    metavar="<os>",
710
                    completion_suggest=OPT_COMPL_ONE_OS)
711

    
712
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
713
                         type="keyval", default={},
714
                         help="OS parameters")
715

    
716
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
717
                               action="store_true", default=False,
718
                               help="Force an unknown variant")
719

    
720
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
721
                            action="store_true", default=False,
722
                            help="Do not install the OS (will"
723
                            " enable no-start)")
724

    
725
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
726
                         type="keyval", default={},
727
                         help="Backend parameters")
728

    
729
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
730
                         default={}, dest="hvparams",
731
                         help="Hypervisor parameters")
732

    
733
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
734
                            help="Hypervisor and hypervisor options, in the"
735
                            " format hypervisor:option=value,option=value,...",
736
                            default=None, type="identkeyval")
737

    
738
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
739
                        help="Hypervisor and hypervisor options, in the"
740
                        " format hypervisor:option=value,option=value,...",
741
                        default=[], action="append", type="identkeyval")
742

    
743
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
744
                           action="store_false",
745
                           help="Don't check that the instance's IP"
746
                           " is alive")
747

    
748
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
749
                             default=True, action="store_false",
750
                             help="Don't check that the instance's name"
751
                             " is resolvable")
752

    
753
NET_OPT = cli_option("--net",
754
                     help="NIC parameters", default=[],
755
                     dest="nics", action="append", type="identkeyval")
756

    
757
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
758
                      dest="disks", action="append", type="identkeyval")
759

    
760
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
761
                         help="Comma-separated list of disks"
762
                         " indices to act on (e.g. 0,2) (optional,"
763
                         " defaults to all disks)")
764

    
765
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
766
                         help="Enforces a single-disk configuration using the"
767
                         " given disk size, in MiB unless a suffix is used",
768
                         default=None, type="unit", metavar="<size>")
769

    
770
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
771
                                dest="ignore_consistency",
772
                                action="store_true", default=False,
773
                                help="Ignore the consistency of the disks on"
774
                                " the secondary")
775

    
776
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
777
                                dest="allow_failover",
778
                                action="store_true", default=False,
779
                                help="If migration is not possible fallback to"
780
                                     " failover")
781

    
782
NONLIVE_OPT = cli_option("--non-live", dest="live",
783
                         default=True, action="store_false",
784
                         help="Do a non-live migration (this usually means"
785
                         " freeze the instance, save the state, transfer and"
786
                         " only then resume running on the secondary node)")
787

    
788
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
789
                                default=None,
790
                                choices=list(constants.HT_MIGRATION_MODES),
791
                                help="Override default migration mode (choose"
792
                                " either live or non-live")
793

    
794
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
795
                                help="Target node and optional secondary node",
796
                                metavar="<pnode>[:<snode>]",
797
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
798

    
799
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
800
                           action="append", metavar="<node>",
801
                           help="Use only this node (can be used multiple"
802
                           " times, if not given defaults to all nodes)",
803
                           completion_suggest=OPT_COMPL_ONE_NODE)
804

    
805
NODEGROUP_OPT = cli_option("-g", "--node-group",
806
                           dest="nodegroup",
807
                           help="Node group (name or uuid)",
808
                           metavar="<nodegroup>",
809
                           default=None, type="string",
810
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
811

    
812
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
813
                             metavar="<node>",
814
                             completion_suggest=OPT_COMPL_ONE_NODE)
815

    
816
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
817
                         action="store_false",
818
                         help="Don't start the instance after creation")
819

    
820
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
821
                         action="store_true", default=False,
822
                         help="Show command instead of executing it")
823

    
824
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
825
                         default=False, action="store_true",
826
                         help="Instead of performing the migration, try to"
827
                         " recover from a failed cleanup. This is safe"
828
                         " to run even if the instance is healthy, but it"
829
                         " will create extra replication traffic and "
830
                         " disrupt briefly the replication (like during the"
831
                         " migration")
832

    
833
STATIC_OPT = cli_option("-s", "--static", dest="static",
834
                        action="store_true", default=False,
835
                        help="Only show configuration data, not runtime data")
836

    
837
ALL_OPT = cli_option("--all", dest="show_all",
838
                     default=False, action="store_true",
839
                     help="Show info on all instances on the cluster."
840
                     " This can take a long time to run, use wisely")
841

    
842
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
843
                           action="store_true", default=False,
844
                           help="Interactive OS reinstall, lists available"
845
                           " OS templates for selection")
846

    
847
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
848
                                 action="store_true", default=False,
849
                                 help="Remove the instance from the cluster"
850
                                 " configuration even if there are failures"
851
                                 " during the removal process")
852

    
853
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
854
                                        dest="ignore_remove_failures",
855
                                        action="store_true", default=False,
856
                                        help="Remove the instance from the"
857
                                        " cluster configuration even if there"
858
                                        " are failures during the removal"
859
                                        " process")
860

    
861
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
862
                                 action="store_true", default=False,
863
                                 help="Remove the instance from the cluster")
864

    
865
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
866
                               help="Specifies the new node for the instance",
867
                               metavar="NODE", default=None,
868
                               completion_suggest=OPT_COMPL_ONE_NODE)
869

    
870
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
871
                               help="Specifies the new secondary node",
872
                               metavar="NODE", default=None,
873
                               completion_suggest=OPT_COMPL_ONE_NODE)
874

    
875
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
876
                            default=False, action="store_true",
877
                            help="Replace the disk(s) on the primary"
878
                                 " node (applies only to internally mirrored"
879
                                 " disk templates, e.g. %s)" %
880
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
881

    
882
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
883
                              default=False, action="store_true",
884
                              help="Replace the disk(s) on the secondary"
885
                                   " node (applies only to internally mirrored"
886
                                   " disk templates, e.g. %s)" %
887
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
888

    
889
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
890
                              default=False, action="store_true",
891
                              help="Lock all nodes and auto-promote as needed"
892
                              " to MC status")
893

    
894
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
895
                              default=False, action="store_true",
896
                              help="Automatically replace faulty disks"
897
                                   " (applies only to internally mirrored"
898
                                   " disk templates, e.g. %s)" %
899
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
900

    
901
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
902
                             default=False, action="store_true",
903
                             help="Ignore current recorded size"
904
                             " (useful for forcing activation when"
905
                             " the recorded size is wrong)")
906

    
907
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
908
                          metavar="<node>",
909
                          completion_suggest=OPT_COMPL_ONE_NODE)
910

    
911
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
912
                         metavar="<dir>")
913

    
914
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
915
                              help="Specify the secondary ip for the node",
916
                              metavar="ADDRESS", default=None)
917

    
918
READD_OPT = cli_option("--readd", dest="readd",
919
                       default=False, action="store_true",
920
                       help="Readd old node after replacing it")
921

    
922
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
923
                                default=True, action="store_false",
924
                                help="Disable SSH key fingerprint checking")
925

    
926
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
927
                                 default=False, action="store_true",
928
                                 help="Force the joining of a node")
929

    
930
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
931
                    type="bool", default=None, metavar=_YORNO,
932
                    help="Set the master_candidate flag on the node")
933

    
934
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
935
                         type="bool", default=None,
936
                         help=("Set the offline flag on the node"
937
                               " (cluster does not communicate with offline"
938
                               " nodes)"))
939

    
940
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
941
                         type="bool", default=None,
942
                         help=("Set the drained flag on the node"
943
                               " (excluded from allocation operations)"))
944

    
945
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
946
                    type="bool", default=None, metavar=_YORNO,
947
                    help="Set the master_capable flag on the node")
948

    
949
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
950
                    type="bool", default=None, metavar=_YORNO,
951
                    help="Set the vm_capable flag on the node")
952

    
953
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
954
                             type="bool", default=None, metavar=_YORNO,
955
                             help="Set the allocatable flag on a volume")
956

    
957
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
958
                               help="Disable support for lvm based instances"
959
                               " (cluster-wide)",
960
                               action="store_false", default=True)
961

    
962
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
963
                            dest="enabled_hypervisors",
964
                            help="Comma-separated list of hypervisors",
965
                            type="string", default=None)
966

    
967
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
968
                            type="keyval", default={},
969
                            help="NIC parameters")
970

    
971
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
972
                         dest="candidate_pool_size", type="int",
973
                         help="Set the candidate pool size")
974

    
975
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
976
                         help=("Enables LVM and specifies the volume group"
977
                               " name (cluster-wide) for disk allocation"
978
                               " [%s]" % constants.DEFAULT_VG),
979
                         metavar="VG", default=None)
980

    
981
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
982
                          help="Destroy cluster", action="store_true")
983

    
984
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
985
                          help="Skip node agreement check (dangerous)",
986
                          action="store_true", default=False)
987

    
988
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
989
                            help="Specify the mac prefix for the instance IP"
990
                            " addresses, in the format XX:XX:XX",
991
                            metavar="PREFIX",
992
                            default=None)
993

    
994
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
995
                               help="Specify the node interface (cluster-wide)"
996
                               " on which the master IP address will be added"
997
                               " (cluster init default: %s)" %
998
                               constants.DEFAULT_BRIDGE,
999
                               metavar="NETDEV",
1000
                               default=None)
1001

    
1002
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1003
                                help="Specify the default directory (cluster-"
1004
                                "wide) for storing the file-based disks [%s]" %
1005
                                constants.DEFAULT_FILE_STORAGE_DIR,
1006
                                metavar="DIR",
1007
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1008

    
1009
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1010
                            dest="shared_file_storage_dir",
1011
                            help="Specify the default directory (cluster-"
1012
                            "wide) for storing the shared file-based"
1013
                            " disks [%s]" %
1014
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1015
                            metavar="SHAREDDIR",
1016
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1017

    
1018
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1019
                                   help="Don't modify /etc/hosts",
1020
                                   action="store_false", default=True)
1021

    
1022
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1023
                                    help="Don't initialize SSH keys",
1024
                                    action="store_false", default=True)
1025

    
1026
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1027
                             help="Enable parseable error messages",
1028
                             action="store_true", default=False)
1029

    
1030
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1031
                          help="Skip N+1 memory redundancy tests",
1032
                          action="store_true", default=False)
1033

    
1034
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1035
                             help="Type of reboot: soft/hard/full",
1036
                             default=constants.INSTANCE_REBOOT_HARD,
1037
                             metavar="<REBOOT>",
1038
                             choices=list(constants.REBOOT_TYPES))
1039

    
1040
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1041
                                    dest="ignore_secondaries",
1042
                                    default=False, action="store_true",
1043
                                    help="Ignore errors from secondaries")
1044

    
1045
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1046
                            action="store_false", default=True,
1047
                            help="Don't shutdown the instance (unsafe)")
1048

    
1049
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1050
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1051
                         help="Maximum time to wait")
1052

    
1053
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1054
                         dest="shutdown_timeout", type="int",
1055
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1056
                         help="Maximum time to wait for instance shutdown")
1057

    
1058
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1059
                          default=None,
1060
                          help=("Number of seconds between repetions of the"
1061
                                " command"))
1062

    
1063
EARLY_RELEASE_OPT = cli_option("--early-release",
1064
                               dest="early_release", default=False,
1065
                               action="store_true",
1066
                               help="Release the locks on the secondary"
1067
                               " node(s) early")
1068

    
1069
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1070
                                  dest="new_cluster_cert",
1071
                                  default=False, action="store_true",
1072
                                  help="Generate a new cluster certificate")
1073

    
1074
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1075
                           default=None,
1076
                           help="File containing new RAPI certificate")
1077

    
1078
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1079
                               default=None, action="store_true",
1080
                               help=("Generate a new self-signed RAPI"
1081
                                     " certificate"))
1082

    
1083
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1084
                                    dest="new_confd_hmac_key",
1085
                                    default=False, action="store_true",
1086
                                    help=("Create a new HMAC key for %s" %
1087
                                          constants.CONFD))
1088

    
1089
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1090
                                       dest="cluster_domain_secret",
1091
                                       default=None,
1092
                                       help=("Load new new cluster domain"
1093
                                             " secret from file"))
1094

    
1095
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1096
                                           dest="new_cluster_domain_secret",
1097
                                           default=False, action="store_true",
1098
                                           help=("Create a new cluster domain"
1099
                                                 " secret"))
1100

    
1101
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1102
                              dest="use_replication_network",
1103
                              help="Whether to use the replication network"
1104
                              " for talking to the nodes",
1105
                              action="store_true", default=False)
1106

    
1107
MAINTAIN_NODE_HEALTH_OPT = \
1108
    cli_option("--maintain-node-health", dest="maintain_node_health",
1109
               metavar=_YORNO, default=None, type="bool",
1110
               help="Configure the cluster to automatically maintain node"
1111
               " health, by shutting down unknown instances, shutting down"
1112
               " unknown DRBD devices, etc.")
1113

    
1114
IDENTIFY_DEFAULTS_OPT = \
1115
    cli_option("--identify-defaults", dest="identify_defaults",
1116
               default=False, action="store_true",
1117
               help="Identify which saved instance parameters are equal to"
1118
               " the current cluster defaults and set them as such, instead"
1119
               " of marking them as overridden")
1120

    
1121
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1122
                         action="store", dest="uid_pool",
1123
                         help=("A list of user-ids or user-id"
1124
                               " ranges separated by commas"))
1125

    
1126
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1127
                          action="store", dest="add_uids",
1128
                          help=("A list of user-ids or user-id"
1129
                                " ranges separated by commas, to be"
1130
                                " added to the user-id pool"))
1131

    
1132
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1133
                             action="store", dest="remove_uids",
1134
                             help=("A list of user-ids or user-id"
1135
                                   " ranges separated by commas, to be"
1136
                                   " removed from the user-id pool"))
1137

    
1138
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1139
                             action="store", dest="reserved_lvs",
1140
                             help=("A comma-separated list of reserved"
1141
                                   " logical volumes names, that will be"
1142
                                   " ignored by cluster verify"))
1143

    
1144
ROMAN_OPT = cli_option("--roman",
1145
                       dest="roman_integers", default=False,
1146
                       action="store_true",
1147
                       help="Use roman numbers for positive integers")
1148

    
1149
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1150
                             action="store", default=None,
1151
                             help="Specifies usermode helper for DRBD")
1152

    
1153
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1154
                                action="store_false", default=True,
1155
                                help="Disable support for DRBD")
1156

    
1157
PRIMARY_IP_VERSION_OPT = \
1158
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1159
               action="store", dest="primary_ip_version",
1160
               metavar="%d|%d" % (constants.IP4_VERSION,
1161
                                  constants.IP6_VERSION),
1162
               help="Cluster-wide IP version for primary IP")
1163

    
1164
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1165
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1166
                          choices=_PRIONAME_TO_VALUE.keys(),
1167
                          help="Priority for opcode processing")
1168

    
1169
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1170
                        type="bool", default=None, metavar=_YORNO,
1171
                        help="Sets the hidden flag on the OS")
1172

    
1173
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1174
                        type="bool", default=None, metavar=_YORNO,
1175
                        help="Sets the blacklisted flag on the OS")
1176

    
1177
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1178
                                     type="bool", metavar=_YORNO,
1179
                                     dest="prealloc_wipe_disks",
1180
                                     help=("Wipe disks prior to instance"
1181
                                           " creation"))
1182

    
1183
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1184
                             type="keyval", default=None,
1185
                             help="Node parameters")
1186

    
1187
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1188
                              action="store", metavar="POLICY", default=None,
1189
                              help="Allocation policy for the node group")
1190

    
1191
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1192
                              type="bool", metavar=_YORNO,
1193
                              dest="node_powered",
1194
                              help="Specify if the SoR for node is powered")
1195

    
1196
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1197
                         default=constants.OOB_TIMEOUT,
1198
                         help="Maximum time to wait for out-of-band helper")
1199

    
1200
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1201
                             default=constants.OOB_POWER_DELAY,
1202
                             help="Time in seconds to wait between power-ons")
1203

    
1204
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1205
                              action="store_true", default=False,
1206
                              help=("Whether command argument should be treated"
1207
                                    " as filter"))
1208

    
1209
NO_REMEMBER_OPT = cli_option("--no-remember",
1210
                             dest="no_remember",
1211
                             action="store_true", default=False,
1212
                             help="Perform but do not record the change"
1213
                             " in the configuration")
1214

    
1215
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1216
                              default=False, action="store_true",
1217
                              help="Evacuate primary instances only")
1218

    
1219
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1220
                                default=False, action="store_true",
1221
                                help="Evacuate secondary instances only"
1222
                                     " (applies only to internally mirrored"
1223
                                     " disk templates, e.g. %s)" %
1224
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1225

    
1226

    
1227
#: Options provided by all commands
1228
COMMON_OPTS = [DEBUG_OPT]
1229

    
1230
# common options for creating instances. add and import then add their own
1231
# specific ones.
1232
COMMON_CREATE_OPTS = [
1233
  BACKEND_OPT,
1234
  DISK_OPT,
1235
  DISK_TEMPLATE_OPT,
1236
  FILESTORE_DIR_OPT,
1237
  FILESTORE_DRIVER_OPT,
1238
  HYPERVISOR_OPT,
1239
  IALLOCATOR_OPT,
1240
  NET_OPT,
1241
  NODE_PLACEMENT_OPT,
1242
  NOIPCHECK_OPT,
1243
  NONAMECHECK_OPT,
1244
  NONICS_OPT,
1245
  NWSYNC_OPT,
1246
  OSPARAMS_OPT,
1247
  OS_SIZE_OPT,
1248
  SUBMIT_OPT,
1249
  TAG_ADD_OPT,
1250
  DRY_RUN_OPT,
1251
  PRIORITY_OPT,
1252
  ]
1253

    
1254

    
1255
def _ParseArgs(argv, commands, aliases):
1256
  """Parser for the command line arguments.
1257

1258
  This function parses the arguments and returns the function which
1259
  must be executed together with its (modified) arguments.
1260

1261
  @param argv: the command line
1262
  @param commands: dictionary with special contents, see the design
1263
      doc for cmdline handling
1264
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1265

1266
  """
1267
  if len(argv) == 0:
1268
    binary = "<command>"
1269
  else:
1270
    binary = argv[0].split("/")[-1]
1271

    
1272
  if len(argv) > 1 and argv[1] == "--version":
1273
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1274
             constants.RELEASE_VERSION)
1275
    # Quit right away. That way we don't have to care about this special
1276
    # argument. optparse.py does it the same.
1277
    sys.exit(0)
1278

    
1279
  if len(argv) < 2 or not (argv[1] in commands or
1280
                           argv[1] in aliases):
1281
    # let's do a nice thing
1282
    sortedcmds = commands.keys()
1283
    sortedcmds.sort()
1284

    
1285
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1286
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1287
    ToStdout("")
1288

    
1289
    # compute the max line length for cmd + usage
1290
    mlen = max([len(" %s" % cmd) for cmd in commands])
1291
    mlen = min(60, mlen) # should not get here...
1292

    
1293
    # and format a nice command list
1294
    ToStdout("Commands:")
1295
    for cmd in sortedcmds:
1296
      cmdstr = " %s" % (cmd,)
1297
      help_text = commands[cmd][4]
1298
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1299
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1300
      for line in help_lines:
1301
        ToStdout("%-*s   %s", mlen, "", line)
1302

    
1303
    ToStdout("")
1304

    
1305
    return None, None, None
1306

    
1307
  # get command, unalias it, and look it up in commands
1308
  cmd = argv.pop(1)
1309
  if cmd in aliases:
1310
    if cmd in commands:
1311
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1312
                                   " command" % cmd)
1313

    
1314
    if aliases[cmd] not in commands:
1315
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1316
                                   " command '%s'" % (cmd, aliases[cmd]))
1317

    
1318
    cmd = aliases[cmd]
1319

    
1320
  func, args_def, parser_opts, usage, description = commands[cmd]
1321
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1322
                        description=description,
1323
                        formatter=TitledHelpFormatter(),
1324
                        usage="%%prog %s %s" % (cmd, usage))
1325
  parser.disable_interspersed_args()
1326
  options, args = parser.parse_args()
1327

    
1328
  if not _CheckArguments(cmd, args_def, args):
1329
    return None, None, None
1330

    
1331
  return func, options, args
1332

    
1333

    
1334
def _CheckArguments(cmd, args_def, args):
1335
  """Verifies the arguments using the argument definition.
1336

1337
  Algorithm:
1338

1339
    1. Abort with error if values specified by user but none expected.
1340

1341
    1. For each argument in definition
1342

1343
      1. Keep running count of minimum number of values (min_count)
1344
      1. Keep running count of maximum number of values (max_count)
1345
      1. If it has an unlimited number of values
1346

1347
        1. Abort with error if it's not the last argument in the definition
1348

1349
    1. If last argument has limited number of values
1350

1351
      1. Abort with error if number of values doesn't match or is too large
1352

1353
    1. Abort with error if user didn't pass enough values (min_count)
1354

1355
  """
1356
  if args and not args_def:
1357
    ToStderr("Error: Command %s expects no arguments", cmd)
1358
    return False
1359

    
1360
  min_count = None
1361
  max_count = None
1362
  check_max = None
1363

    
1364
  last_idx = len(args_def) - 1
1365

    
1366
  for idx, arg in enumerate(args_def):
1367
    if min_count is None:
1368
      min_count = arg.min
1369
    elif arg.min is not None:
1370
      min_count += arg.min
1371

    
1372
    if max_count is None:
1373
      max_count = arg.max
1374
    elif arg.max is not None:
1375
      max_count += arg.max
1376

    
1377
    if idx == last_idx:
1378
      check_max = (arg.max is not None)
1379

    
1380
    elif arg.max is None:
1381
      raise errors.ProgrammerError("Only the last argument can have max=None")
1382

    
1383
  if check_max:
1384
    # Command with exact number of arguments
1385
    if (min_count is not None and max_count is not None and
1386
        min_count == max_count and len(args) != min_count):
1387
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1388
      return False
1389

    
1390
    # Command with limited number of arguments
1391
    if max_count is not None and len(args) > max_count:
1392
      ToStderr("Error: Command %s expects only %d argument(s)",
1393
               cmd, max_count)
1394
      return False
1395

    
1396
  # Command with some required arguments
1397
  if min_count is not None and len(args) < min_count:
1398
    ToStderr("Error: Command %s expects at least %d argument(s)",
1399
             cmd, min_count)
1400
    return False
1401

    
1402
  return True
1403

    
1404

    
1405
def SplitNodeOption(value):
1406
  """Splits the value of a --node option.
1407

1408
  """
1409
  if value and ':' in value:
1410
    return value.split(':', 1)
1411
  else:
1412
    return (value, None)
1413

    
1414

    
1415
def CalculateOSNames(os_name, os_variants):
1416
  """Calculates all the names an OS can be called, according to its variants.
1417

1418
  @type os_name: string
1419
  @param os_name: base name of the os
1420
  @type os_variants: list or None
1421
  @param os_variants: list of supported variants
1422
  @rtype: list
1423
  @return: list of valid names
1424

1425
  """
1426
  if os_variants:
1427
    return ['%s+%s' % (os_name, v) for v in os_variants]
1428
  else:
1429
    return [os_name]
1430

    
1431

    
1432
def ParseFields(selected, default):
1433
  """Parses the values of "--field"-like options.
1434

1435
  @type selected: string or None
1436
  @param selected: User-selected options
1437
  @type default: list
1438
  @param default: Default fields
1439

1440
  """
1441
  if selected is None:
1442
    return default
1443

    
1444
  if selected.startswith("+"):
1445
    return default + selected[1:].split(",")
1446

    
1447
  return selected.split(",")
1448

    
1449

    
1450
UsesRPC = rpc.RunWithRPC
1451

    
1452

    
1453
def AskUser(text, choices=None):
1454
  """Ask the user a question.
1455

1456
  @param text: the question to ask
1457

1458
  @param choices: list with elements tuples (input_char, return_value,
1459
      description); if not given, it will default to: [('y', True,
1460
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1461
      note that the '?' char is reserved for help
1462

1463
  @return: one of the return values from the choices list; if input is
1464
      not possible (i.e. not running with a tty, we return the last
1465
      entry from the list
1466

1467
  """
1468
  if choices is None:
1469
    choices = [('y', True, 'Perform the operation'),
1470
               ('n', False, 'Do not perform the operation')]
1471
  if not choices or not isinstance(choices, list):
1472
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1473
  for entry in choices:
1474
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1475
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1476

    
1477
  answer = choices[-1][1]
1478
  new_text = []
1479
  for line in text.splitlines():
1480
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1481
  text = "\n".join(new_text)
1482
  try:
1483
    f = file("/dev/tty", "a+")
1484
  except IOError:
1485
    return answer
1486
  try:
1487
    chars = [entry[0] for entry in choices]
1488
    chars[-1] = "[%s]" % chars[-1]
1489
    chars.append('?')
1490
    maps = dict([(entry[0], entry[1]) for entry in choices])
1491
    while True:
1492
      f.write(text)
1493
      f.write('\n')
1494
      f.write("/".join(chars))
1495
      f.write(": ")
1496
      line = f.readline(2).strip().lower()
1497
      if line in maps:
1498
        answer = maps[line]
1499
        break
1500
      elif line == '?':
1501
        for entry in choices:
1502
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1503
        f.write("\n")
1504
        continue
1505
  finally:
1506
    f.close()
1507
  return answer
1508

    
1509

    
1510
class JobSubmittedException(Exception):
1511
  """Job was submitted, client should exit.
1512

1513
  This exception has one argument, the ID of the job that was
1514
  submitted. The handler should print this ID.
1515

1516
  This is not an error, just a structured way to exit from clients.
1517

1518
  """
1519

    
1520

    
1521
def SendJob(ops, cl=None):
1522
  """Function to submit an opcode without waiting for the results.
1523

1524
  @type ops: list
1525
  @param ops: list of opcodes
1526
  @type cl: luxi.Client
1527
  @param cl: the luxi client to use for communicating with the master;
1528
             if None, a new client will be created
1529

1530
  """
1531
  if cl is None:
1532
    cl = GetClient()
1533

    
1534
  job_id = cl.SubmitJob(ops)
1535

    
1536
  return job_id
1537

    
1538

    
1539
def GenericPollJob(job_id, cbs, report_cbs):
1540
  """Generic job-polling function.
1541

1542
  @type job_id: number
1543
  @param job_id: Job ID
1544
  @type cbs: Instance of L{JobPollCbBase}
1545
  @param cbs: Data callbacks
1546
  @type report_cbs: Instance of L{JobPollReportCbBase}
1547
  @param report_cbs: Reporting callbacks
1548

1549
  """
1550
  prev_job_info = None
1551
  prev_logmsg_serial = None
1552

    
1553
  status = None
1554

    
1555
  while True:
1556
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1557
                                      prev_logmsg_serial)
1558
    if not result:
1559
      # job not found, go away!
1560
      raise errors.JobLost("Job with id %s lost" % job_id)
1561

    
1562
    if result == constants.JOB_NOTCHANGED:
1563
      report_cbs.ReportNotChanged(job_id, status)
1564

    
1565
      # Wait again
1566
      continue
1567

    
1568
    # Split result, a tuple of (field values, log entries)
1569
    (job_info, log_entries) = result
1570
    (status, ) = job_info
1571

    
1572
    if log_entries:
1573
      for log_entry in log_entries:
1574
        (serial, timestamp, log_type, message) = log_entry
1575
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1576
                                    log_type, message)
1577
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1578

    
1579
    # TODO: Handle canceled and archived jobs
1580
    elif status in (constants.JOB_STATUS_SUCCESS,
1581
                    constants.JOB_STATUS_ERROR,
1582
                    constants.JOB_STATUS_CANCELING,
1583
                    constants.JOB_STATUS_CANCELED):
1584
      break
1585

    
1586
    prev_job_info = job_info
1587

    
1588
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1589
  if not jobs:
1590
    raise errors.JobLost("Job with id %s lost" % job_id)
1591

    
1592
  status, opstatus, result = jobs[0]
1593

    
1594
  if status == constants.JOB_STATUS_SUCCESS:
1595
    return result
1596

    
1597
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1598
    raise errors.OpExecError("Job was canceled")
1599

    
1600
  has_ok = False
1601
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1602
    if status == constants.OP_STATUS_SUCCESS:
1603
      has_ok = True
1604
    elif status == constants.OP_STATUS_ERROR:
1605
      errors.MaybeRaise(msg)
1606

    
1607
      if has_ok:
1608
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1609
                                 (idx, msg))
1610

    
1611
      raise errors.OpExecError(str(msg))
1612

    
1613
  # default failure mode
1614
  raise errors.OpExecError(result)
1615

    
1616

    
1617
class JobPollCbBase:
1618
  """Base class for L{GenericPollJob} callbacks.
1619

1620
  """
1621
  def __init__(self):
1622
    """Initializes this class.
1623

1624
    """
1625

    
1626
  def WaitForJobChangeOnce(self, job_id, fields,
1627
                           prev_job_info, prev_log_serial):
1628
    """Waits for changes on a job.
1629

1630
    """
1631
    raise NotImplementedError()
1632

    
1633
  def QueryJobs(self, job_ids, fields):
1634
    """Returns the selected fields for the selected job IDs.
1635

1636
    @type job_ids: list of numbers
1637
    @param job_ids: Job IDs
1638
    @type fields: list of strings
1639
    @param fields: Fields
1640

1641
    """
1642
    raise NotImplementedError()
1643

    
1644

    
1645
class JobPollReportCbBase:
1646
  """Base class for L{GenericPollJob} reporting callbacks.
1647

1648
  """
1649
  def __init__(self):
1650
    """Initializes this class.
1651

1652
    """
1653

    
1654
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1655
    """Handles a log message.
1656

1657
    """
1658
    raise NotImplementedError()
1659

    
1660
  def ReportNotChanged(self, job_id, status):
1661
    """Called for if a job hasn't changed in a while.
1662

1663
    @type job_id: number
1664
    @param job_id: Job ID
1665
    @type status: string or None
1666
    @param status: Job status if available
1667

1668
    """
1669
    raise NotImplementedError()
1670

    
1671

    
1672
class _LuxiJobPollCb(JobPollCbBase):
1673
  def __init__(self, cl):
1674
    """Initializes this class.
1675

1676
    """
1677
    JobPollCbBase.__init__(self)
1678
    self.cl = cl
1679

    
1680
  def WaitForJobChangeOnce(self, job_id, fields,
1681
                           prev_job_info, prev_log_serial):
1682
    """Waits for changes on a job.
1683

1684
    """
1685
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1686
                                        prev_job_info, prev_log_serial)
1687

    
1688
  def QueryJobs(self, job_ids, fields):
1689
    """Returns the selected fields for the selected job IDs.
1690

1691
    """
1692
    return self.cl.QueryJobs(job_ids, fields)
1693

    
1694

    
1695
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1696
  def __init__(self, feedback_fn):
1697
    """Initializes this class.
1698

1699
    """
1700
    JobPollReportCbBase.__init__(self)
1701

    
1702
    self.feedback_fn = feedback_fn
1703

    
1704
    assert callable(feedback_fn)
1705

    
1706
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1707
    """Handles a log message.
1708

1709
    """
1710
    self.feedback_fn((timestamp, log_type, log_msg))
1711

    
1712
  def ReportNotChanged(self, job_id, status):
1713
    """Called if a job hasn't changed in a while.
1714

1715
    """
1716
    # Ignore
1717

    
1718

    
1719
class StdioJobPollReportCb(JobPollReportCbBase):
1720
  def __init__(self):
1721
    """Initializes this class.
1722

1723
    """
1724
    JobPollReportCbBase.__init__(self)
1725

    
1726
    self.notified_queued = False
1727
    self.notified_waitlock = False
1728

    
1729
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1730
    """Handles a log message.
1731

1732
    """
1733
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1734
             FormatLogMessage(log_type, log_msg))
1735

    
1736
  def ReportNotChanged(self, job_id, status):
1737
    """Called if a job hasn't changed in a while.
1738

1739
    """
1740
    if status is None:
1741
      return
1742

    
1743
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1744
      ToStderr("Job %s is waiting in queue", job_id)
1745
      self.notified_queued = True
1746

    
1747
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1748
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1749
      self.notified_waitlock = True
1750

    
1751

    
1752
def FormatLogMessage(log_type, log_msg):
1753
  """Formats a job message according to its type.
1754

1755
  """
1756
  if log_type != constants.ELOG_MESSAGE:
1757
    log_msg = str(log_msg)
1758

    
1759
  return utils.SafeEncode(log_msg)
1760

    
1761

    
1762
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1763
  """Function to poll for the result of a job.
1764

1765
  @type job_id: job identified
1766
  @param job_id: the job to poll for results
1767
  @type cl: luxi.Client
1768
  @param cl: the luxi client to use for communicating with the master;
1769
             if None, a new client will be created
1770

1771
  """
1772
  if cl is None:
1773
    cl = GetClient()
1774

    
1775
  if reporter is None:
1776
    if feedback_fn:
1777
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1778
    else:
1779
      reporter = StdioJobPollReportCb()
1780
  elif feedback_fn:
1781
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1782

    
1783
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1784

    
1785

    
1786
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1787
  """Legacy function to submit an opcode.
1788

1789
  This is just a simple wrapper over the construction of the processor
1790
  instance. It should be extended to better handle feedback and
1791
  interaction functions.
1792

1793
  """
1794
  if cl is None:
1795
    cl = GetClient()
1796

    
1797
  SetGenericOpcodeOpts([op], opts)
1798

    
1799
  job_id = SendJob([op], cl=cl)
1800

    
1801
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1802
                       reporter=reporter)
1803

    
1804
  return op_results[0]
1805

    
1806

    
1807
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1808
  """Wrapper around SubmitOpCode or SendJob.
1809

1810
  This function will decide, based on the 'opts' parameter, whether to
1811
  submit and wait for the result of the opcode (and return it), or
1812
  whether to just send the job and print its identifier. It is used in
1813
  order to simplify the implementation of the '--submit' option.
1814

1815
  It will also process the opcodes if we're sending the via SendJob
1816
  (otherwise SubmitOpCode does it).
1817

1818
  """
1819
  if opts and opts.submit_only:
1820
    job = [op]
1821
    SetGenericOpcodeOpts(job, opts)
1822
    job_id = SendJob(job, cl=cl)
1823
    raise JobSubmittedException(job_id)
1824
  else:
1825
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1826

    
1827

    
1828
def SetGenericOpcodeOpts(opcode_list, options):
1829
  """Processor for generic options.
1830

1831
  This function updates the given opcodes based on generic command
1832
  line options (like debug, dry-run, etc.).
1833

1834
  @param opcode_list: list of opcodes
1835
  @param options: command line options or None
1836
  @return: None (in-place modification)
1837

1838
  """
1839
  if not options:
1840
    return
1841
  for op in opcode_list:
1842
    op.debug_level = options.debug
1843
    if hasattr(options, "dry_run"):
1844
      op.dry_run = options.dry_run
1845
    if getattr(options, "priority", None) is not None:
1846
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1847

    
1848

    
1849
def GetClient():
1850
  # TODO: Cache object?
1851
  try:
1852
    client = luxi.Client()
1853
  except luxi.NoMasterError:
1854
    ss = ssconf.SimpleStore()
1855

    
1856
    # Try to read ssconf file
1857
    try:
1858
      ss.GetMasterNode()
1859
    except errors.ConfigurationError:
1860
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1861
                                 " not part of a cluster")
1862

    
1863
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1864
    if master != myself:
1865
      raise errors.OpPrereqError("This is not the master node, please connect"
1866
                                 " to node '%s' and rerun the command" %
1867
                                 master)
1868
    raise
1869
  return client
1870

    
1871

    
1872
def FormatError(err):
1873
  """Return a formatted error message for a given error.
1874

1875
  This function takes an exception instance and returns a tuple
1876
  consisting of two values: first, the recommended exit code, and
1877
  second, a string describing the error message (not
1878
  newline-terminated).
1879

1880
  """
1881
  retcode = 1
1882
  obuf = StringIO()
1883
  msg = str(err)
1884
  if isinstance(err, errors.ConfigurationError):
1885
    txt = "Corrupt configuration file: %s" % msg
1886
    logging.error(txt)
1887
    obuf.write(txt + "\n")
1888
    obuf.write("Aborting.")
1889
    retcode = 2
1890
  elif isinstance(err, errors.HooksAbort):
1891
    obuf.write("Failure: hooks execution failed:\n")
1892
    for node, script, out in err.args[0]:
1893
      if out:
1894
        obuf.write("  node: %s, script: %s, output: %s\n" %
1895
                   (node, script, out))
1896
      else:
1897
        obuf.write("  node: %s, script: %s (no output)\n" %
1898
                   (node, script))
1899
  elif isinstance(err, errors.HooksFailure):
1900
    obuf.write("Failure: hooks general failure: %s" % msg)
1901
  elif isinstance(err, errors.ResolverError):
1902
    this_host = netutils.Hostname.GetSysName()
1903
    if err.args[0] == this_host:
1904
      msg = "Failure: can't resolve my own hostname ('%s')"
1905
    else:
1906
      msg = "Failure: can't resolve hostname '%s'"
1907
    obuf.write(msg % err.args[0])
1908
  elif isinstance(err, errors.OpPrereqError):
1909
    if len(err.args) == 2:
1910
      obuf.write("Failure: prerequisites not met for this"
1911
               " operation:\nerror type: %s, error details:\n%s" %
1912
                 (err.args[1], err.args[0]))
1913
    else:
1914
      obuf.write("Failure: prerequisites not met for this"
1915
                 " operation:\n%s" % msg)
1916
  elif isinstance(err, errors.OpExecError):
1917
    obuf.write("Failure: command execution error:\n%s" % msg)
1918
  elif isinstance(err, errors.TagError):
1919
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1920
  elif isinstance(err, errors.JobQueueDrainError):
1921
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1922
               " accept new requests\n")
1923
  elif isinstance(err, errors.JobQueueFull):
1924
    obuf.write("Failure: the job queue is full and doesn't accept new"
1925
               " job submissions until old jobs are archived\n")
1926
  elif isinstance(err, errors.TypeEnforcementError):
1927
    obuf.write("Parameter Error: %s" % msg)
1928
  elif isinstance(err, errors.ParameterError):
1929
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1930
  elif isinstance(err, luxi.NoMasterError):
1931
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1932
               " and listening for connections?")
1933
  elif isinstance(err, luxi.TimeoutError):
1934
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1935
               " been submitted and will continue to run even if the call"
1936
               " timed out. Useful commands in this situation are \"gnt-job"
1937
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1938
    obuf.write(msg)
1939
  elif isinstance(err, luxi.PermissionError):
1940
    obuf.write("It seems you don't have permissions to connect to the"
1941
               " master daemon.\nPlease retry as a different user.")
1942
  elif isinstance(err, luxi.ProtocolError):
1943
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1944
               "%s" % msg)
1945
  elif isinstance(err, errors.JobLost):
1946
    obuf.write("Error checking job status: %s" % msg)
1947
  elif isinstance(err, errors.QueryFilterParseError):
1948
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1949
    obuf.write("\n".join(err.GetDetails()))
1950
  elif isinstance(err, errors.GenericError):
1951
    obuf.write("Unhandled Ganeti error: %s" % msg)
1952
  elif isinstance(err, JobSubmittedException):
1953
    obuf.write("JobID: %s\n" % err.args[0])
1954
    retcode = 0
1955
  else:
1956
    obuf.write("Unhandled exception: %s" % msg)
1957
  return retcode, obuf.getvalue().rstrip('\n')
1958

    
1959

    
1960
def GenericMain(commands, override=None, aliases=None):
1961
  """Generic main function for all the gnt-* commands.
1962

1963
  Arguments:
1964
    - commands: a dictionary with a special structure, see the design doc
1965
                for command line handling.
1966
    - override: if not None, we expect a dictionary with keys that will
1967
                override command line options; this can be used to pass
1968
                options from the scripts to generic functions
1969
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1970

1971
  """
1972
  # save the program name and the entire command line for later logging
1973
  if sys.argv:
1974
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1975
    if len(sys.argv) >= 2:
1976
      binary += " " + sys.argv[1]
1977
      old_cmdline = " ".join(sys.argv[2:])
1978
    else:
1979
      old_cmdline = ""
1980
  else:
1981
    binary = "<unknown program>"
1982
    old_cmdline = ""
1983

    
1984
  if aliases is None:
1985
    aliases = {}
1986

    
1987
  try:
1988
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1989
  except errors.ParameterError, err:
1990
    result, err_msg = FormatError(err)
1991
    ToStderr(err_msg)
1992
    return 1
1993

    
1994
  if func is None: # parse error
1995
    return 1
1996

    
1997
  if override is not None:
1998
    for key, val in override.iteritems():
1999
      setattr(options, key, val)
2000

    
2001
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2002
                     stderr_logging=True)
2003

    
2004
  if old_cmdline:
2005
    logging.info("run with arguments '%s'", old_cmdline)
2006
  else:
2007
    logging.info("run with no arguments")
2008

    
2009
  try:
2010
    result = func(options, args)
2011
  except (errors.GenericError, luxi.ProtocolError,
2012
          JobSubmittedException), err:
2013
    result, err_msg = FormatError(err)
2014
    logging.exception("Error during command processing")
2015
    ToStderr(err_msg)
2016
  except KeyboardInterrupt:
2017
    result = constants.EXIT_FAILURE
2018
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2019
             " might have been submitted and"
2020
             " will continue to run in the background.")
2021
  except IOError, err:
2022
    if err.errno == errno.EPIPE:
2023
      # our terminal went away, we'll exit
2024
      sys.exit(constants.EXIT_FAILURE)
2025
    else:
2026
      raise
2027

    
2028
  return result
2029

    
2030

    
2031
def ParseNicOption(optvalue):
2032
  """Parses the value of the --net option(s).
2033

2034
  """
2035
  try:
2036
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2037
  except (TypeError, ValueError), err:
2038
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2039

    
2040
  nics = [{}] * nic_max
2041
  for nidx, ndict in optvalue:
2042
    nidx = int(nidx)
2043

    
2044
    if not isinstance(ndict, dict):
2045
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2046
                                 " got %s" % (nidx, ndict))
2047

    
2048
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2049

    
2050
    nics[nidx] = ndict
2051

    
2052
  return nics
2053

    
2054

    
2055
def GenericInstanceCreate(mode, opts, args):
2056
  """Add an instance to the cluster via either creation or import.
2057

2058
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2059
  @param opts: the command line options selected by the user
2060
  @type args: list
2061
  @param args: should contain only one element, the new instance name
2062
  @rtype: int
2063
  @return: the desired exit code
2064

2065
  """
2066
  instance = args[0]
2067

    
2068
  (pnode, snode) = SplitNodeOption(opts.node)
2069

    
2070
  hypervisor = None
2071
  hvparams = {}
2072
  if opts.hypervisor:
2073
    hypervisor, hvparams = opts.hypervisor
2074

    
2075
  if opts.nics:
2076
    nics = ParseNicOption(opts.nics)
2077
  elif opts.no_nics:
2078
    # no nics
2079
    nics = []
2080
  elif mode == constants.INSTANCE_CREATE:
2081
    # default of one nic, all auto
2082
    nics = [{}]
2083
  else:
2084
    # mode == import
2085
    nics = []
2086

    
2087
  if opts.disk_template == constants.DT_DISKLESS:
2088
    if opts.disks or opts.sd_size is not None:
2089
      raise errors.OpPrereqError("Diskless instance but disk"
2090
                                 " information passed")
2091
    disks = []
2092
  else:
2093
    if (not opts.disks and not opts.sd_size
2094
        and mode == constants.INSTANCE_CREATE):
2095
      raise errors.OpPrereqError("No disk information specified")
2096
    if opts.disks and opts.sd_size is not None:
2097
      raise errors.OpPrereqError("Please use either the '--disk' or"
2098
                                 " '-s' option")
2099
    if opts.sd_size is not None:
2100
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2101

    
2102
    if opts.disks:
2103
      try:
2104
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2105
      except ValueError, err:
2106
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2107
      disks = [{}] * disk_max
2108
    else:
2109
      disks = []
2110
    for didx, ddict in opts.disks:
2111
      didx = int(didx)
2112
      if not isinstance(ddict, dict):
2113
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2114
        raise errors.OpPrereqError(msg)
2115
      elif constants.IDISK_SIZE in ddict:
2116
        if constants.IDISK_ADOPT in ddict:
2117
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2118
                                     " (disk %d)" % didx)
2119
        try:
2120
          ddict[constants.IDISK_SIZE] = \
2121
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2122
        except ValueError, err:
2123
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2124
                                     (didx, err))
2125
      elif constants.IDISK_ADOPT in ddict:
2126
        if mode == constants.INSTANCE_IMPORT:
2127
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2128
                                     " import")
2129
        ddict[constants.IDISK_SIZE] = 0
2130
      else:
2131
        raise errors.OpPrereqError("Missing size or adoption source for"
2132
                                   " disk %d" % didx)
2133
      disks[didx] = ddict
2134

    
2135
  if opts.tags is not None:
2136
    tags = opts.tags.split(",")
2137
  else:
2138
    tags = []
2139

    
2140
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2141
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2142

    
2143
  if mode == constants.INSTANCE_CREATE:
2144
    start = opts.start
2145
    os_type = opts.os
2146
    force_variant = opts.force_variant
2147
    src_node = None
2148
    src_path = None
2149
    no_install = opts.no_install
2150
    identify_defaults = False
2151
  elif mode == constants.INSTANCE_IMPORT:
2152
    start = False
2153
    os_type = None
2154
    force_variant = False
2155
    src_node = opts.src_node
2156
    src_path = opts.src_dir
2157
    no_install = None
2158
    identify_defaults = opts.identify_defaults
2159
  else:
2160
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2161

    
2162
  op = opcodes.OpInstanceCreate(instance_name=instance,
2163
                                disks=disks,
2164
                                disk_template=opts.disk_template,
2165
                                nics=nics,
2166
                                pnode=pnode, snode=snode,
2167
                                ip_check=opts.ip_check,
2168
                                name_check=opts.name_check,
2169
                                wait_for_sync=opts.wait_for_sync,
2170
                                file_storage_dir=opts.file_storage_dir,
2171
                                file_driver=opts.file_driver,
2172
                                iallocator=opts.iallocator,
2173
                                hypervisor=hypervisor,
2174
                                hvparams=hvparams,
2175
                                beparams=opts.beparams,
2176
                                osparams=opts.osparams,
2177
                                mode=mode,
2178
                                start=start,
2179
                                os_type=os_type,
2180
                                force_variant=force_variant,
2181
                                src_node=src_node,
2182
                                src_path=src_path,
2183
                                tags=tags,
2184
                                no_install=no_install,
2185
                                identify_defaults=identify_defaults)
2186

    
2187
  SubmitOrSend(op, opts)
2188
  return 0
2189

    
2190

    
2191
class _RunWhileClusterStoppedHelper:
2192
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2193

2194
  """
2195
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2196
    """Initializes this class.
2197

2198
    @type feedback_fn: callable
2199
    @param feedback_fn: Feedback function
2200
    @type cluster_name: string
2201
    @param cluster_name: Cluster name
2202
    @type master_node: string
2203
    @param master_node Master node name
2204
    @type online_nodes: list
2205
    @param online_nodes: List of names of online nodes
2206

2207
    """
2208
    self.feedback_fn = feedback_fn
2209
    self.cluster_name = cluster_name
2210
    self.master_node = master_node
2211
    self.online_nodes = online_nodes
2212

    
2213
    self.ssh = ssh.SshRunner(self.cluster_name)
2214

    
2215
    self.nonmaster_nodes = [name for name in online_nodes
2216
                            if name != master_node]
2217

    
2218
    assert self.master_node not in self.nonmaster_nodes
2219

    
2220
  def _RunCmd(self, node_name, cmd):
2221
    """Runs a command on the local or a remote machine.
2222

2223
    @type node_name: string
2224
    @param node_name: Machine name
2225
    @type cmd: list
2226
    @param cmd: Command
2227

2228
    """
2229
    if node_name is None or node_name == self.master_node:
2230
      # No need to use SSH
2231
      result = utils.RunCmd(cmd)
2232
    else:
2233
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2234

    
2235
    if result.failed:
2236
      errmsg = ["Failed to run command %s" % result.cmd]
2237
      if node_name:
2238
        errmsg.append("on node %s" % node_name)
2239
      errmsg.append(": exitcode %s and error %s" %
2240
                    (result.exit_code, result.output))
2241
      raise errors.OpExecError(" ".join(errmsg))
2242

    
2243
  def Call(self, fn, *args):
2244
    """Call function while all daemons are stopped.
2245

2246
    @type fn: callable
2247
    @param fn: Function to be called
2248

2249
    """
2250
    # Pause watcher by acquiring an exclusive lock on watcher state file
2251
    self.feedback_fn("Blocking watcher")
2252
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2253
    try:
2254
      # TODO: Currently, this just blocks. There's no timeout.
2255
      # TODO: Should it be a shared lock?
2256
      watcher_block.Exclusive(blocking=True)
2257

    
2258
      # Stop master daemons, so that no new jobs can come in and all running
2259
      # ones are finished
2260
      self.feedback_fn("Stopping master daemons")
2261
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2262
      try:
2263
        # Stop daemons on all nodes
2264
        for node_name in self.online_nodes:
2265
          self.feedback_fn("Stopping daemons on %s" % node_name)
2266
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2267

    
2268
        # All daemons are shut down now
2269
        try:
2270
          return fn(self, *args)
2271
        except Exception, err:
2272
          _, errmsg = FormatError(err)
2273
          logging.exception("Caught exception")
2274
          self.feedback_fn(errmsg)
2275
          raise
2276
      finally:
2277
        # Start cluster again, master node last
2278
        for node_name in self.nonmaster_nodes + [self.master_node]:
2279
          self.feedback_fn("Starting daemons on %s" % node_name)
2280
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2281
    finally:
2282
      # Resume watcher
2283
      watcher_block.Close()
2284

    
2285

    
2286
def RunWhileClusterStopped(feedback_fn, fn, *args):
2287
  """Calls a function while all cluster daemons are stopped.
2288

2289
  @type feedback_fn: callable
2290
  @param feedback_fn: Feedback function
2291
  @type fn: callable
2292
  @param fn: Function to be called when daemons are stopped
2293

2294
  """
2295
  feedback_fn("Gathering cluster information")
2296

    
2297
  # This ensures we're running on the master daemon
2298
  cl = GetClient()
2299

    
2300
  (cluster_name, master_node) = \
2301
    cl.QueryConfigValues(["cluster_name", "master_node"])
2302

    
2303
  online_nodes = GetOnlineNodes([], cl=cl)
2304

    
2305
  # Don't keep a reference to the client. The master daemon will go away.
2306
  del cl
2307

    
2308
  assert master_node in online_nodes
2309

    
2310
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2311
                                       online_nodes).Call(fn, *args)
2312

    
2313

    
2314
def GenerateTable(headers, fields, separator, data,
2315
                  numfields=None, unitfields=None,
2316
                  units=None):
2317
  """Prints a table with headers and different fields.
2318

2319
  @type headers: dict
2320
  @param headers: dictionary mapping field names to headers for
2321
      the table
2322
  @type fields: list
2323
  @param fields: the field names corresponding to each row in
2324
      the data field
2325
  @param separator: the separator to be used; if this is None,
2326
      the default 'smart' algorithm is used which computes optimal
2327
      field width, otherwise just the separator is used between
2328
      each field
2329
  @type data: list
2330
  @param data: a list of lists, each sublist being one row to be output
2331
  @type numfields: list
2332
  @param numfields: a list with the fields that hold numeric
2333
      values and thus should be right-aligned
2334
  @type unitfields: list
2335
  @param unitfields: a list with the fields that hold numeric
2336
      values that should be formatted with the units field
2337
  @type units: string or None
2338
  @param units: the units we should use for formatting, or None for
2339
      automatic choice (human-readable for non-separator usage, otherwise
2340
      megabytes); this is a one-letter string
2341

2342
  """
2343
  if units is None:
2344
    if separator:
2345
      units = "m"
2346
    else:
2347
      units = "h"
2348

    
2349
  if numfields is None:
2350
    numfields = []
2351
  if unitfields is None:
2352
    unitfields = []
2353

    
2354
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2355
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2356

    
2357
  format_fields = []
2358
  for field in fields:
2359
    if headers and field not in headers:
2360
      # TODO: handle better unknown fields (either revert to old
2361
      # style of raising exception, or deal more intelligently with
2362
      # variable fields)
2363
      headers[field] = field
2364
    if separator is not None:
2365
      format_fields.append("%s")
2366
    elif numfields.Matches(field):
2367
      format_fields.append("%*s")
2368
    else:
2369
      format_fields.append("%-*s")
2370

    
2371
  if separator is None:
2372
    mlens = [0 for name in fields]
2373
    format_str = ' '.join(format_fields)
2374
  else:
2375
    format_str = separator.replace("%", "%%").join(format_fields)
2376

    
2377
  for row in data:
2378
    if row is None:
2379
      continue
2380
    for idx, val in enumerate(row):
2381
      if unitfields.Matches(fields[idx]):
2382
        try:
2383
          val = int(val)
2384
        except (TypeError, ValueError):
2385
          pass
2386
        else:
2387
          val = row[idx] = utils.FormatUnit(val, units)
2388
      val = row[idx] = str(val)
2389
      if separator is None:
2390
        mlens[idx] = max(mlens[idx], len(val))
2391

    
2392
  result = []
2393
  if headers:
2394
    args = []
2395
    for idx, name in enumerate(fields):
2396
      hdr = headers[name]
2397
      if separator is None:
2398
        mlens[idx] = max(mlens[idx], len(hdr))
2399
        args.append(mlens[idx])
2400
      args.append(hdr)
2401
    result.append(format_str % tuple(args))
2402

    
2403
  if separator is None:
2404
    assert len(mlens) == len(fields)
2405

    
2406
    if fields and not numfields.Matches(fields[-1]):
2407
      mlens[-1] = 0
2408

    
2409
  for line in data:
2410
    args = []
2411
    if line is None:
2412
      line = ['-' for _ in fields]
2413
    for idx in range(len(fields)):
2414
      if separator is None:
2415
        args.append(mlens[idx])
2416
      args.append(line[idx])
2417
    result.append(format_str % tuple(args))
2418

    
2419
  return result
2420

    
2421

    
2422
def _FormatBool(value):
2423
  """Formats a boolean value as a string.
2424

2425
  """
2426
  if value:
2427
    return "Y"
2428
  return "N"
2429

    
2430

    
2431
#: Default formatting for query results; (callback, align right)
2432
_DEFAULT_FORMAT_QUERY = {
2433
  constants.QFT_TEXT: (str, False),
2434
  constants.QFT_BOOL: (_FormatBool, False),
2435
  constants.QFT_NUMBER: (str, True),
2436
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2437
  constants.QFT_OTHER: (str, False),
2438
  constants.QFT_UNKNOWN: (str, False),
2439
  }
2440

    
2441

    
2442
def _GetColumnFormatter(fdef, override, unit):
2443
  """Returns formatting function for a field.
2444

2445
  @type fdef: L{objects.QueryFieldDefinition}
2446
  @type override: dict
2447
  @param override: Dictionary for overriding field formatting functions,
2448
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2449
  @type unit: string
2450
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2451
  @rtype: tuple; (callable, bool)
2452
  @return: Returns the function to format a value (takes one parameter) and a
2453
    boolean for aligning the value on the right-hand side
2454

2455
  """
2456
  fmt = override.get(fdef.name, None)
2457
  if fmt is not None:
2458
    return fmt
2459

    
2460
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2461

    
2462
  if fdef.kind == constants.QFT_UNIT:
2463
    # Can't keep this information in the static dictionary
2464
    return (lambda value: utils.FormatUnit(value, unit), True)
2465

    
2466
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2467
  if fmt is not None:
2468
    return fmt
2469

    
2470
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2471

    
2472

    
2473
class _QueryColumnFormatter:
2474
  """Callable class for formatting fields of a query.
2475

2476
  """
2477
  def __init__(self, fn, status_fn, verbose):
2478
    """Initializes this class.
2479

2480
    @type fn: callable
2481
    @param fn: Formatting function
2482
    @type status_fn: callable
2483
    @param status_fn: Function to report fields' status
2484
    @type verbose: boolean
2485
    @param verbose: whether to use verbose field descriptions or not
2486

2487
    """
2488
    self._fn = fn
2489
    self._status_fn = status_fn
2490
    self._verbose = verbose
2491

    
2492
  def __call__(self, data):
2493
    """Returns a field's string representation.
2494

2495
    """
2496
    (status, value) = data
2497

    
2498
    # Report status
2499
    self._status_fn(status)
2500

    
2501
    if status == constants.RS_NORMAL:
2502
      return self._fn(value)
2503

    
2504
    assert value is None, \
2505
           "Found value %r for abnormal status %s" % (value, status)
2506

    
2507
    return FormatResultError(status, self._verbose)
2508

    
2509

    
2510
def FormatResultError(status, verbose):
2511
  """Formats result status other than L{constants.RS_NORMAL}.
2512

2513
  @param status: The result status
2514
  @type verbose: boolean
2515
  @param verbose: Whether to return the verbose text
2516
  @return: Text of result status
2517

2518
  """
2519
  assert status != constants.RS_NORMAL, \
2520
         "FormatResultError called with status equal to constants.RS_NORMAL"
2521
  try:
2522
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2523
  except KeyError:
2524
    raise NotImplementedError("Unknown status %s" % status)
2525
  else:
2526
    if verbose:
2527
      return verbose_text
2528
    return normal_text
2529

    
2530

    
2531
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2532
                      header=False, verbose=False):
2533
  """Formats data in L{objects.QueryResponse}.
2534

2535
  @type result: L{objects.QueryResponse}
2536
  @param result: result of query operation
2537
  @type unit: string
2538
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2539
    see L{utils.text.FormatUnit}
2540
  @type format_override: dict
2541
  @param format_override: Dictionary for overriding field formatting functions,
2542
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2543
  @type separator: string or None
2544
  @param separator: String used to separate fields
2545
  @type header: bool
2546
  @param header: Whether to output header row
2547
  @type verbose: boolean
2548
  @param verbose: whether to use verbose field descriptions or not
2549

2550
  """
2551
  if unit is None:
2552
    if separator:
2553
      unit = "m"
2554
    else:
2555
      unit = "h"
2556

    
2557
  if format_override is None:
2558
    format_override = {}
2559

    
2560
  stats = dict.fromkeys(constants.RS_ALL, 0)
2561

    
2562
  def _RecordStatus(status):
2563
    if status in stats:
2564
      stats[status] += 1
2565

    
2566
  columns = []
2567
  for fdef in result.fields:
2568
    assert fdef.title and fdef.name
2569
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2570
    columns.append(TableColumn(fdef.title,
2571
                               _QueryColumnFormatter(fn, _RecordStatus,
2572
                                                     verbose),
2573
                               align_right))
2574

    
2575
  table = FormatTable(result.data, columns, header, separator)
2576

    
2577
  # Collect statistics
2578
  assert len(stats) == len(constants.RS_ALL)
2579
  assert compat.all(count >= 0 for count in stats.values())
2580

    
2581
  # Determine overall status. If there was no data, unknown fields must be
2582
  # detected via the field definitions.
2583
  if (stats[constants.RS_UNKNOWN] or
2584
      (not result.data and _GetUnknownFields(result.fields))):
2585
    status = QR_UNKNOWN
2586
  elif compat.any(count > 0 for key, count in stats.items()
2587
                  if key != constants.RS_NORMAL):
2588
    status = QR_INCOMPLETE
2589
  else:
2590
    status = QR_NORMAL
2591

    
2592
  return (status, table)
2593

    
2594

    
2595
def _GetUnknownFields(fdefs):
2596
  """Returns list of unknown fields included in C{fdefs}.
2597

2598
  @type fdefs: list of L{objects.QueryFieldDefinition}
2599

2600
  """
2601
  return [fdef for fdef in fdefs
2602
          if fdef.kind == constants.QFT_UNKNOWN]
2603

    
2604

    
2605
def _WarnUnknownFields(fdefs):
2606
  """Prints a warning to stderr if a query included unknown fields.
2607

2608
  @type fdefs: list of L{objects.QueryFieldDefinition}
2609

2610
  """
2611
  unknown = _GetUnknownFields(fdefs)
2612
  if unknown:
2613
    ToStderr("Warning: Queried for unknown fields %s",
2614
             utils.CommaJoin(fdef.name for fdef in unknown))
2615
    return True
2616

    
2617
  return False
2618

    
2619

    
2620
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2621
                format_override=None, verbose=False, force_filter=False):
2622
  """Generic implementation for listing all items of a resource.
2623

2624
  @param resource: One of L{constants.QR_VIA_LUXI}
2625
  @type fields: list of strings
2626
  @param fields: List of fields to query for
2627
  @type names: list of strings
2628
  @param names: Names of items to query for
2629
  @type unit: string or None
2630
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2631
    None for automatic choice (human-readable for non-separator usage,
2632
    otherwise megabytes); this is a one-letter string
2633
  @type separator: string or None
2634
  @param separator: String used to separate fields
2635
  @type header: bool
2636
  @param header: Whether to show header row
2637
  @type force_filter: bool
2638
  @param force_filter: Whether to always treat names as filter
2639
  @type format_override: dict
2640
  @param format_override: Dictionary for overriding field formatting functions,
2641
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2642
  @type verbose: boolean
2643
  @param verbose: whether to use verbose field descriptions or not
2644

2645
  """
2646
  if cl is None:
2647
    cl = GetClient()
2648

    
2649
  if not names:
2650
    names = None
2651

    
2652
  if (force_filter or
2653
      (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
2654
    try:
2655
      (filter_text, ) = names
2656
    except ValueError:
2657
      raise errors.OpPrereqError("Exactly one argument must be given as a"
2658
                                 " filter")
2659

    
2660
    logging.debug("Parsing '%s' as filter", filter_text)
2661
    filter_ = qlang.ParseFilter(filter_text)
2662
  else:
2663
    filter_ = qlang.MakeSimpleFilter("name", names)
2664

    
2665
  response = cl.Query(resource, fields, filter_)
2666

    
2667
  found_unknown = _WarnUnknownFields(response.fields)
2668

    
2669
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2670
                                     header=header,
2671
                                     format_override=format_override,
2672
                                     verbose=verbose)
2673

    
2674
  for line in data:
2675
    ToStdout(line)
2676

    
2677
  assert ((found_unknown and status == QR_UNKNOWN) or
2678
          (not found_unknown and status != QR_UNKNOWN))
2679

    
2680
  if status == QR_UNKNOWN:
2681
    return constants.EXIT_UNKNOWN_FIELD
2682

    
2683
  # TODO: Should the list command fail if not all data could be collected?
2684
  return constants.EXIT_SUCCESS
2685

    
2686

    
2687
def GenericListFields(resource, fields, separator, header, cl=None):
2688
  """Generic implementation for listing fields for a resource.
2689

2690
  @param resource: One of L{constants.QR_VIA_LUXI}
2691
  @type fields: list of strings
2692
  @param fields: List of fields to query for
2693
  @type separator: string or None
2694
  @param separator: String used to separate fields
2695
  @type header: bool
2696
  @param header: Whether to show header row
2697

2698
  """
2699
  if cl is None:
2700
    cl = GetClient()
2701

    
2702
  if not fields:
2703
    fields = None
2704

    
2705
  response = cl.QueryFields(resource, fields)
2706

    
2707
  found_unknown = _WarnUnknownFields(response.fields)
2708

    
2709
  columns = [
2710
    TableColumn("Name", str, False),
2711
    TableColumn("Title", str, False),
2712
    TableColumn("Description", str, False),
2713
    ]
2714

    
2715
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2716

    
2717
  for line in FormatTable(rows, columns, header, separator):
2718
    ToStdout(line)
2719

    
2720
  if found_unknown:
2721
    return constants.EXIT_UNKNOWN_FIELD
2722

    
2723
  return constants.EXIT_SUCCESS
2724

    
2725

    
2726
class TableColumn:
2727
  """Describes a column for L{FormatTable}.
2728

2729
  """
2730
  def __init__(self, title, fn, align_right):
2731
    """Initializes this class.
2732

2733
    @type title: string
2734
    @param title: Column title
2735
    @type fn: callable
2736
    @param fn: Formatting function
2737
    @type align_right: bool
2738
    @param align_right: Whether to align values on the right-hand side
2739

2740
    """
2741
    self.title = title
2742
    self.format = fn
2743
    self.align_right = align_right
2744

    
2745

    
2746
def _GetColFormatString(width, align_right):
2747
  """Returns the format string for a field.
2748

2749
  """
2750
  if align_right:
2751
    sign = ""
2752
  else:
2753
    sign = "-"
2754

    
2755
  return "%%%s%ss" % (sign, width)
2756

    
2757

    
2758
def FormatTable(rows, columns, header, separator):
2759
  """Formats data as a table.
2760

2761
  @type rows: list of lists
2762
  @param rows: Row data, one list per row
2763
  @type columns: list of L{TableColumn}
2764
  @param columns: Column descriptions
2765
  @type header: bool
2766
  @param header: Whether to show header row
2767
  @type separator: string or None
2768
  @param separator: String used to separate columns
2769

2770
  """
2771
  if header:
2772
    data = [[col.title for col in columns]]
2773
    colwidth = [len(col.title) for col in columns]
2774
  else:
2775
    data = []
2776
    colwidth = [0 for _ in columns]
2777

    
2778
  # Format row data
2779
  for row in rows:
2780
    assert len(row) == len(columns)
2781

    
2782
    formatted = [col.format(value) for value, col in zip(row, columns)]
2783

    
2784
    if separator is None:
2785
      # Update column widths
2786
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2787
        # Modifying a list's items while iterating is fine
2788
        colwidth[idx] = max(oldwidth, len(value))
2789

    
2790
    data.append(formatted)
2791

    
2792
  if separator is not None:
2793
    # Return early if a separator is used
2794
    return [separator.join(row) for row in data]
2795

    
2796
  if columns and not columns[-1].align_right:
2797
    # Avoid unnecessary spaces at end of line
2798
    colwidth[-1] = 0
2799

    
2800
  # Build format string
2801
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2802
                  for col, width in zip(columns, colwidth)])
2803

    
2804
  return [fmt % tuple(row) for row in data]
2805

    
2806

    
2807
def FormatTimestamp(ts):
2808
  """Formats a given timestamp.
2809

2810
  @type ts: timestamp
2811
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2812

2813
  @rtype: string
2814
  @return: a string with the formatted timestamp
2815

2816
  """
2817
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2818
    return '?'
2819
  sec, usec = ts
2820
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2821

    
2822

    
2823
def ParseTimespec(value):
2824
  """Parse a time specification.
2825

2826
  The following suffixed will be recognized:
2827

2828
    - s: seconds
2829
    - m: minutes
2830
    - h: hours
2831
    - d: day
2832
    - w: weeks
2833

2834
  Without any suffix, the value will be taken to be in seconds.
2835

2836
  """
2837
  value = str(value)
2838
  if not value:
2839
    raise errors.OpPrereqError("Empty time specification passed")
2840
  suffix_map = {
2841
    's': 1,
2842
    'm': 60,
2843
    'h': 3600,
2844
    'd': 86400,
2845
    'w': 604800,
2846
    }
2847
  if value[-1] not in suffix_map:
2848
    try:
2849
      value = int(value)
2850
    except (TypeError, ValueError):
2851
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2852
  else:
2853
    multiplier = suffix_map[value[-1]]
2854
    value = value[:-1]
2855
    if not value: # no data left after stripping the suffix
2856
      raise errors.OpPrereqError("Invalid time specification (only"
2857
                                 " suffix passed)")
2858
    try:
2859
      value = int(value) * multiplier
2860
    except (TypeError, ValueError):
2861
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2862
  return value
2863

    
2864

    
2865
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2866
                   filter_master=False):
2867
  """Returns the names of online nodes.
2868

2869
  This function will also log a warning on stderr with the names of
2870
  the online nodes.
2871

2872
  @param nodes: if not empty, use only this subset of nodes (minus the
2873
      offline ones)
2874
  @param cl: if not None, luxi client to use
2875
  @type nowarn: boolean
2876
  @param nowarn: by default, this function will output a note with the
2877
      offline nodes that are skipped; if this parameter is True the
2878
      note is not displayed
2879
  @type secondary_ips: boolean
2880
  @param secondary_ips: if True, return the secondary IPs instead of the
2881
      names, useful for doing network traffic over the replication interface
2882
      (if any)
2883
  @type filter_master: boolean
2884
  @param filter_master: if True, do not return the master node in the list
2885
      (useful in coordination with secondary_ips where we cannot check our
2886
      node name against the list)
2887

2888
  """
2889
  if cl is None:
2890
    cl = GetClient()
2891

    
2892
  if secondary_ips:
2893
    name_idx = 2
2894
  else:
2895
    name_idx = 0
2896

    
2897
  if filter_master:
2898
    master_node = cl.QueryConfigValues(["master_node"])[0]
2899
    filter_fn = lambda x: x != master_node
2900
  else:
2901
    filter_fn = lambda _: True
2902

    
2903
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2904
                         use_locking=False)
2905
  offline = [row[0] for row in result if row[1]]
2906
  if offline and not nowarn:
2907
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2908
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2909

    
2910

    
2911
def _ToStream(stream, txt, *args):
2912
  """Write a message to a stream, bypassing the logging system
2913

2914
  @type stream: file object
2915
  @param stream: the file to which we should write
2916
  @type txt: str
2917
  @param txt: the message
2918

2919
  """
2920
  try:
2921
    if args:
2922
      args = tuple(args)
2923
      stream.write(txt % args)
2924
    else:
2925
      stream.write(txt)
2926
    stream.write('\n')
2927
    stream.flush()
2928
  except IOError, err:
2929
    if err.errno == errno.EPIPE:
2930
      # our terminal went away, we'll exit
2931
      sys.exit(constants.EXIT_FAILURE)
2932
    else:
2933
      raise
2934

    
2935

    
2936
def ToStdout(txt, *args):
2937
  """Write a message to stdout only, bypassing the logging system
2938

2939
  This is just a wrapper over _ToStream.
2940

2941
  @type txt: str
2942
  @param txt: the message
2943

2944
  """
2945
  _ToStream(sys.stdout, txt, *args)
2946

    
2947

    
2948
def ToStderr(txt, *args):
2949
  """Write a message to stderr only, bypassing the logging system
2950

2951
  This is just a wrapper over _ToStream.
2952

2953
  @type txt: str
2954
  @param txt: the message
2955

2956
  """
2957
  _ToStream(sys.stderr, txt, *args)
2958

    
2959

    
2960
class JobExecutor(object):
2961
  """Class which manages the submission and execution of multiple jobs.
2962

2963
  Note that instances of this class should not be reused between
2964
  GetResults() calls.
2965

2966
  """
2967
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2968
    self.queue = []
2969
    if cl is None:
2970
      cl = GetClient()
2971
    self.cl = cl
2972
    self.verbose = verbose
2973
    self.jobs = []
2974
    self.opts = opts
2975
    self.feedback_fn = feedback_fn
2976
    self._counter = itertools.count()
2977

    
2978
  @staticmethod
2979
  def _IfName(name, fmt):
2980
    """Helper function for formatting name.
2981

2982
    """
2983
    if name:
2984
      return fmt % name
2985

    
2986
    return ""
2987

    
2988
  def QueueJob(self, name, *ops):
2989
    """Record a job for later submit.
2990

2991
    @type name: string
2992
    @param name: a description of the job, will be used in WaitJobSet
2993

2994
    """
2995
    SetGenericOpcodeOpts(ops, self.opts)
2996
    self.queue.append((self._counter.next(), name, ops))
2997

    
2998
  def AddJobId(self, name, status, job_id):
2999
    """Adds a job ID to the internal queue.
3000

3001
    """
3002
    self.jobs.append((self._counter.next(), status, job_id, name))
3003

    
3004
  def SubmitPending(self, each=False):
3005
    """Submit all pending jobs.
3006

3007
    """
3008
    if each:
3009
      results = []
3010
      for (_, _, ops) in self.queue:
3011
        # SubmitJob will remove the success status, but raise an exception if
3012
        # the submission fails, so we'll notice that anyway.
3013
        results.append([True, self.cl.SubmitJob(ops)])
3014
    else:
3015
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3016
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3017
      self.jobs.append((idx, status, data, name))
3018

    
3019
  def _ChooseJob(self):
3020
    """Choose a non-waiting/queued job to poll next.
3021

3022
    """
3023
    assert self.jobs, "_ChooseJob called with empty job list"
3024

    
3025
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3026
    assert result
3027

    
3028
    for job_data, status in zip(self.jobs, result):
3029
      if (isinstance(status, list) and status and
3030
          status[0] in (constants.JOB_STATUS_QUEUED,
3031
                        constants.JOB_STATUS_WAITLOCK,
3032
                        constants.JOB_STATUS_CANCELING)):
3033
        # job is still present and waiting
3034
        continue
3035
      # good candidate found (either running job or lost job)
3036
      self.jobs.remove(job_data)
3037
      return job_data
3038

    
3039
    # no job found
3040
    return self.jobs.pop(0)
3041

    
3042
  def GetResults(self):
3043
    """Wait for and return the results of all jobs.
3044

3045
    @rtype: list
3046
    @return: list of tuples (success, job results), in the same order
3047
        as the submitted jobs; if a job has failed, instead of the result
3048
        there will be the error message
3049

3050
    """
3051
    if not self.jobs:
3052
      self.SubmitPending()
3053
    results = []
3054
    if self.verbose:
3055
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3056
      if ok_jobs:
3057
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3058

    
3059
    # first, remove any non-submitted jobs
3060
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3061
    for idx, _, jid, name in failures:
3062
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3063
      results.append((idx, False, jid))
3064

    
3065
    while self.jobs:
3066
      (idx, _, jid, name) = self._ChooseJob()
3067
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3068
      try:
3069
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3070
        success = True
3071
      except errors.JobLost, err:
3072
        _, job_result = FormatError(err)
3073
        ToStderr("Job %s%s has been archived, cannot check its result",
3074
                 jid, self._IfName(name, " for %s"))
3075
        success = False
3076
      except (errors.GenericError, luxi.ProtocolError), err:
3077
        _, job_result = FormatError(err)
3078
        success = False
3079
        # the error message will always be shown, verbose or not
3080
        ToStderr("Job %s%s has failed: %s",
3081
                 jid, self._IfName(name, " for %s"), job_result)
3082

    
3083
      results.append((idx, success, job_result))
3084

    
3085
    # sort based on the index, then drop it
3086
    results.sort()
3087
    results = [i[1:] for i in results]
3088

    
3089
    return results
3090

    
3091
  def WaitOrShow(self, wait):
3092
    """Wait for job results or only print the job IDs.
3093

3094
    @type wait: boolean
3095
    @param wait: whether to wait or not
3096

3097
    """
3098
    if wait:
3099
      return self.GetResults()
3100
    else:
3101
      if not self.jobs:
3102
        self.SubmitPending()
3103
      for _, status, result, name in self.jobs:
3104
        if status:
3105
          ToStdout("%s: %s", result, name)
3106
        else:
3107
          ToStderr("Failure for %s: %s", name, result)
3108
      return [row[1:3] for row in self.jobs]
3109

    
3110

    
3111
def FormatParameterDict(buf, param_dict, actual, level=1):
3112
  """Formats a parameter dictionary.
3113

3114
  @type buf: L{StringIO}
3115
  @param buf: the buffer into which to write
3116
  @type param_dict: dict
3117
  @param param_dict: the own parameters
3118
  @type actual: dict
3119
  @param actual: the current parameter set (including defaults)
3120
  @param level: Level of indent
3121

3122
  """
3123
  indent = "  " * level
3124
  for key in sorted(actual):
3125
    val = param_dict.get(key, "default (%s)" % actual[key])
3126
    buf.write("%s- %s: %s\n" % (indent, key, val))
3127

    
3128

    
3129
def ConfirmOperation(names, list_type, text, extra=""):
3130
  """Ask the user to confirm an operation on a list of list_type.
3131

3132
  This function is used to request confirmation for doing an operation
3133
  on a given list of list_type.
3134

3135
  @type names: list
3136
  @param names: the list of names that we display when
3137
      we ask for confirmation
3138
  @type list_type: str
3139
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3140
  @type text: str
3141
  @param text: the operation that the user should confirm
3142
  @rtype: boolean
3143
  @return: True or False depending on user's confirmation.
3144

3145
  """
3146
  count = len(names)
3147
  msg = ("The %s will operate on %d %s.\n%s"
3148
         "Do you want to continue?" % (text, count, list_type, extra))
3149
  affected = (("\nAffected %s:\n" % list_type) +
3150
              "\n".join(["  %s" % name for name in names]))
3151

    
3152
  choices = [("y", True, "Yes, execute the %s" % text),
3153
             ("n", False, "No, abort the %s" % text)]
3154

    
3155
  if count > 20:
3156
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3157
    question = msg
3158
  else:
3159
    question = msg + affected
3160

    
3161
  choice = AskUser(question, choices)
3162
  if choice == "v":
3163
    choices.pop(1)
3164
    choice = AskUser(msg + affected, choices)
3165
  return choice