Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 0aee8ee9

History | View | Annotate | Download (102.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
from cStringIO import StringIO
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import opcodes
38
from ganeti import luxi
39
from ganeti import ssconf
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45

    
46
from optparse import (OptionParser, TitledHelpFormatter,
47
                      Option, OptionValueError)
48

    
49

    
50
__all__ = [
51
  # Command line options
52
  "ADD_UIDS_OPT",
53
  "ALLOCATABLE_OPT",
54
  "ALLOC_POLICY_OPT",
55
  "ALL_OPT",
56
  "ALLOW_FAILOVER_OPT",
57
  "AUTO_PROMOTE_OPT",
58
  "AUTO_REPLACE_OPT",
59
  "BACKEND_OPT",
60
  "BLK_OS_OPT",
61
  "CAPAB_MASTER_OPT",
62
  "CAPAB_VM_OPT",
63
  "CLEANUP_OPT",
64
  "CLUSTER_DOMAIN_SECRET_OPT",
65
  "CONFIRM_OPT",
66
  "CP_SIZE_OPT",
67
  "DEBUG_OPT",
68
  "DEBUG_SIMERR_OPT",
69
  "DISKIDX_OPT",
70
  "DISK_OPT",
71
  "DISK_TEMPLATE_OPT",
72
  "DRAINED_OPT",
73
  "DRY_RUN_OPT",
74
  "DRBD_HELPER_OPT",
75
  "DST_NODE_OPT",
76
  "EARLY_RELEASE_OPT",
77
  "ENABLED_HV_OPT",
78
  "ERROR_CODES_OPT",
79
  "FIELDS_OPT",
80
  "FILESTORE_DIR_OPT",
81
  "FILESTORE_DRIVER_OPT",
82
  "FORCE_FILTER_OPT",
83
  "FORCE_OPT",
84
  "FORCE_VARIANT_OPT",
85
  "GLOBAL_FILEDIR_OPT",
86
  "HID_OS_OPT",
87
  "GLOBAL_SHARED_FILEDIR_OPT",
88
  "HVLIST_OPT",
89
  "HVOPTS_OPT",
90
  "HYPERVISOR_OPT",
91
  "IALLOCATOR_OPT",
92
  "DEFAULT_IALLOCATOR_OPT",
93
  "IDENTIFY_DEFAULTS_OPT",
94
  "IGNORE_CONSIST_OPT",
95
  "IGNORE_FAILURES_OPT",
96
  "IGNORE_OFFLINE_OPT",
97
  "IGNORE_REMOVE_FAILURES_OPT",
98
  "IGNORE_SECONDARIES_OPT",
99
  "IGNORE_SIZE_OPT",
100
  "INTERVAL_OPT",
101
  "MAC_PREFIX_OPT",
102
  "MAINTAIN_NODE_HEALTH_OPT",
103
  "MASTER_NETDEV_OPT",
104
  "MC_OPT",
105
  "MIGRATION_MODE_OPT",
106
  "NET_OPT",
107
  "NEW_CLUSTER_CERT_OPT",
108
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109
  "NEW_CONFD_HMAC_KEY_OPT",
110
  "NEW_RAPI_CERT_OPT",
111
  "NEW_SECONDARY_OPT",
112
  "NIC_PARAMS_OPT",
113
  "NODE_FORCE_JOIN_OPT",
114
  "NODE_LIST_OPT",
115
  "NODE_PLACEMENT_OPT",
116
  "NODEGROUP_OPT",
117
  "NODE_PARAMS_OPT",
118
  "NODE_POWERED_OPT",
119
  "NODRBD_STORAGE_OPT",
120
  "NOHDR_OPT",
121
  "NOIPCHECK_OPT",
122
  "NO_INSTALL_OPT",
123
  "NONAMECHECK_OPT",
124
  "NOLVM_STORAGE_OPT",
125
  "NOMODIFY_ETCHOSTS_OPT",
126
  "NOMODIFY_SSH_SETUP_OPT",
127
  "NONICS_OPT",
128
  "NONLIVE_OPT",
129
  "NONPLUS1_OPT",
130
  "NOSHUTDOWN_OPT",
131
  "NOSTART_OPT",
132
  "NOSSH_KEYCHECK_OPT",
133
  "NOVOTING_OPT",
134
  "NO_REMEMBER_OPT",
135
  "NWSYNC_OPT",
136
  "ON_PRIMARY_OPT",
137
  "ON_SECONDARY_OPT",
138
  "OFFLINE_OPT",
139
  "OSPARAMS_OPT",
140
  "OS_OPT",
141
  "OS_SIZE_OPT",
142
  "OOB_TIMEOUT_OPT",
143
  "POWER_DELAY_OPT",
144
  "PREALLOC_WIPE_DISKS_OPT",
145
  "PRIMARY_IP_VERSION_OPT",
146
  "PRIMARY_ONLY_OPT",
147
  "PRIORITY_OPT",
148
  "RAPI_CERT_OPT",
149
  "READD_OPT",
150
  "REBOOT_TYPE_OPT",
151
  "REMOVE_INSTANCE_OPT",
152
  "REMOVE_UIDS_OPT",
153
  "RESERVED_LVS_OPT",
154
  "ROMAN_OPT",
155
  "SECONDARY_IP_OPT",
156
  "SECONDARY_ONLY_OPT",
157
  "SELECT_OS_OPT",
158
  "SEP_OPT",
159
  "SHOWCMD_OPT",
160
  "SHUTDOWN_TIMEOUT_OPT",
161
  "SINGLE_NODE_OPT",
162
  "SRC_DIR_OPT",
163
  "SRC_NODE_OPT",
164
  "SUBMIT_OPT",
165
  "STARTUP_PAUSED_OPT",
166
  "STATIC_OPT",
167
  "SYNC_OPT",
168
  "TAG_ADD_OPT",
169
  "TAG_SRC_OPT",
170
  "TIMEOUT_OPT",
171
  "TO_GROUP_OPT",
172
  "UIDPOOL_OPT",
173
  "USEUNITS_OPT",
174
  "USE_REPL_NET_OPT",
175
  "VERBOSE_OPT",
176
  "VG_NAME_OPT",
177
  "YES_DOIT_OPT",
178
  # Generic functions for CLI programs
179
  "ConfirmOperation",
180
  "GenericMain",
181
  "GenericInstanceCreate",
182
  "GenericList",
183
  "GenericListFields",
184
  "GetClient",
185
  "GetOnlineNodes",
186
  "JobExecutor",
187
  "JobSubmittedException",
188
  "ParseTimespec",
189
  "RunWhileClusterStopped",
190
  "SubmitOpCode",
191
  "SubmitOrSend",
192
  "UsesRPC",
193
  # Formatting functions
194
  "ToStderr", "ToStdout",
195
  "FormatError",
196
  "FormatQueryResult",
197
  "FormatParameterDict",
198
  "GenerateTable",
199
  "AskUser",
200
  "FormatTimestamp",
201
  "FormatLogMessage",
202
  # Tags functions
203
  "ListTags",
204
  "AddTags",
205
  "RemoveTags",
206
  # command line options support infrastructure
207
  "ARGS_MANY_INSTANCES",
208
  "ARGS_MANY_NODES",
209
  "ARGS_MANY_GROUPS",
210
  "ARGS_NONE",
211
  "ARGS_ONE_INSTANCE",
212
  "ARGS_ONE_NODE",
213
  "ARGS_ONE_GROUP",
214
  "ARGS_ONE_OS",
215
  "ArgChoice",
216
  "ArgCommand",
217
  "ArgFile",
218
  "ArgGroup",
219
  "ArgHost",
220
  "ArgInstance",
221
  "ArgJobId",
222
  "ArgNode",
223
  "ArgOs",
224
  "ArgSuggest",
225
  "ArgUnknown",
226
  "OPT_COMPL_INST_ADD_NODES",
227
  "OPT_COMPL_MANY_NODES",
228
  "OPT_COMPL_ONE_IALLOCATOR",
229
  "OPT_COMPL_ONE_INSTANCE",
230
  "OPT_COMPL_ONE_NODE",
231
  "OPT_COMPL_ONE_NODEGROUP",
232
  "OPT_COMPL_ONE_OS",
233
  "cli_option",
234
  "SplitNodeOption",
235
  "CalculateOSNames",
236
  "ParseFields",
237
  "COMMON_CREATE_OPTS",
238
  ]
239

    
240
NO_PREFIX = "no_"
241
UN_PREFIX = "-"
242

    
243
#: Priorities (sorted)
244
_PRIORITY_NAMES = [
245
  ("low", constants.OP_PRIO_LOW),
246
  ("normal", constants.OP_PRIO_NORMAL),
247
  ("high", constants.OP_PRIO_HIGH),
248
  ]
249

    
250
#: Priority dictionary for easier lookup
251
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
252
# we migrate to Python 2.6
253
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
254

    
255
# Query result status for clients
256
(QR_NORMAL,
257
 QR_UNKNOWN,
258
 QR_INCOMPLETE) = range(3)
259

    
260
#: Maximum batch size for ChooseJob
261
_CHOOSE_BATCH = 25
262

    
263

    
264
class _Argument:
265
  def __init__(self, min=0, max=None): # pylint: disable=W0622
266
    self.min = min
267
    self.max = max
268

    
269
  def __repr__(self):
270
    return ("<%s min=%s max=%s>" %
271
            (self.__class__.__name__, self.min, self.max))
272

    
273

    
274
class ArgSuggest(_Argument):
275
  """Suggesting argument.
276

277
  Value can be any of the ones passed to the constructor.
278

279
  """
280
  # pylint: disable=W0622
281
  def __init__(self, min=0, max=None, choices=None):
282
    _Argument.__init__(self, min=min, max=max)
283
    self.choices = choices
284

    
285
  def __repr__(self):
286
    return ("<%s min=%s max=%s choices=%r>" %
287
            (self.__class__.__name__, self.min, self.max, self.choices))
288

    
289

    
290
class ArgChoice(ArgSuggest):
291
  """Choice argument.
292

293
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
294
  but value must be one of the choices.
295

296
  """
297

    
298

    
299
class ArgUnknown(_Argument):
300
  """Unknown argument to program (e.g. determined at runtime).
301

302
  """
303

    
304

    
305
class ArgInstance(_Argument):
306
  """Instances argument.
307

308
  """
309

    
310

    
311
class ArgNode(_Argument):
312
  """Node argument.
313

314
  """
315

    
316

    
317
class ArgGroup(_Argument):
318
  """Node group argument.
319

320
  """
321

    
322

    
323
class ArgJobId(_Argument):
324
  """Job ID argument.
325

326
  """
327

    
328

    
329
class ArgFile(_Argument):
330
  """File path argument.
331

332
  """
333

    
334

    
335
class ArgCommand(_Argument):
336
  """Command argument.
337

338
  """
339

    
340

    
341
class ArgHost(_Argument):
342
  """Host argument.
343

344
  """
345

    
346

    
347
class ArgOs(_Argument):
348
  """OS argument.
349

350
  """
351

    
352

    
353
ARGS_NONE = []
354
ARGS_MANY_INSTANCES = [ArgInstance()]
355
ARGS_MANY_NODES = [ArgNode()]
356
ARGS_MANY_GROUPS = [ArgGroup()]
357
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
358
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
359
# TODO
360
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
361
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
362

    
363

    
364
def _ExtractTagsObject(opts, args):
365
  """Extract the tag type object.
366

367
  Note that this function will modify its args parameter.
368

369
  """
370
  if not hasattr(opts, "tag_type"):
371
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
372
  kind = opts.tag_type
373
  if kind == constants.TAG_CLUSTER:
374
    retval = kind, kind
375
  elif kind in (constants.TAG_NODEGROUP,
376
                constants.TAG_NODE,
377
                constants.TAG_INSTANCE):
378
    if not args:
379
      raise errors.OpPrereqError("no arguments passed to the command")
380
    name = args.pop(0)
381
    retval = kind, name
382
  else:
383
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
384
  return retval
385

    
386

    
387
def _ExtendTags(opts, args):
388
  """Extend the args if a source file has been given.
389

390
  This function will extend the tags with the contents of the file
391
  passed in the 'tags_source' attribute of the opts parameter. A file
392
  named '-' will be replaced by stdin.
393

394
  """
395
  fname = opts.tags_source
396
  if fname is None:
397
    return
398
  if fname == "-":
399
    new_fh = sys.stdin
400
  else:
401
    new_fh = open(fname, "r")
402
  new_data = []
403
  try:
404
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
405
    # because of python bug 1633941
406
    while True:
407
      line = new_fh.readline()
408
      if not line:
409
        break
410
      new_data.append(line.strip())
411
  finally:
412
    new_fh.close()
413
  args.extend(new_data)
414

    
415

    
416
def ListTags(opts, args):
417
  """List the tags on a given object.
418

419
  This is a generic implementation that knows how to deal with all
420
  three cases of tag objects (cluster, node, instance). The opts
421
  argument is expected to contain a tag_type field denoting what
422
  object type we work on.
423

424
  """
425
  kind, name = _ExtractTagsObject(opts, args)
426
  cl = GetClient()
427
  result = cl.QueryTags(kind, name)
428
  result = list(result)
429
  result.sort()
430
  for tag in result:
431
    ToStdout(tag)
432

    
433

    
434
def AddTags(opts, args):
435
  """Add tags on a given object.
436

437
  This is a generic implementation that knows how to deal with all
438
  three cases of tag objects (cluster, node, instance). The opts
439
  argument is expected to contain a tag_type field denoting what
440
  object type we work on.
441

442
  """
443
  kind, name = _ExtractTagsObject(opts, args)
444
  _ExtendTags(opts, args)
445
  if not args:
446
    raise errors.OpPrereqError("No tags to be added")
447
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
448
  SubmitOpCode(op, opts=opts)
449

    
450

    
451
def RemoveTags(opts, args):
452
  """Remove tags from a given object.
453

454
  This is a generic implementation that knows how to deal with all
455
  three cases of tag objects (cluster, node, instance). The opts
456
  argument is expected to contain a tag_type field denoting what
457
  object type we work on.
458

459
  """
460
  kind, name = _ExtractTagsObject(opts, args)
461
  _ExtendTags(opts, args)
462
  if not args:
463
    raise errors.OpPrereqError("No tags to be removed")
464
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
465
  SubmitOpCode(op, opts=opts)
466

    
467

    
468
def check_unit(option, opt, value): # pylint: disable=W0613
469
  """OptParsers custom converter for units.
470

471
  """
472
  try:
473
    return utils.ParseUnit(value)
474
  except errors.UnitParseError, err:
475
    raise OptionValueError("option %s: %s" % (opt, err))
476

    
477

    
478
def _SplitKeyVal(opt, data):
479
  """Convert a KeyVal string into a dict.
480

481
  This function will convert a key=val[,...] string into a dict. Empty
482
  values will be converted specially: keys which have the prefix 'no_'
483
  will have the value=False and the prefix stripped, the others will
484
  have value=True.
485

486
  @type opt: string
487
  @param opt: a string holding the option name for which we process the
488
      data, used in building error messages
489
  @type data: string
490
  @param data: a string of the format key=val,key=val,...
491
  @rtype: dict
492
  @return: {key=val, key=val}
493
  @raises errors.ParameterError: if there are duplicate keys
494

495
  """
496
  kv_dict = {}
497
  if data:
498
    for elem in utils.UnescapeAndSplit(data, sep=","):
499
      if "=" in elem:
500
        key, val = elem.split("=", 1)
501
      else:
502
        if elem.startswith(NO_PREFIX):
503
          key, val = elem[len(NO_PREFIX):], False
504
        elif elem.startswith(UN_PREFIX):
505
          key, val = elem[len(UN_PREFIX):], None
506
        else:
507
          key, val = elem, True
508
      if key in kv_dict:
509
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
510
                                    (key, opt))
511
      kv_dict[key] = val
512
  return kv_dict
513

    
514

    
515
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
516
  """Custom parser for ident:key=val,key=val options.
517

518
  This will store the parsed values as a tuple (ident, {key: val}). As such,
519
  multiple uses of this option via action=append is possible.
520

521
  """
522
  if ":" not in value:
523
    ident, rest = value, ""
524
  else:
525
    ident, rest = value.split(":", 1)
526

    
527
  if ident.startswith(NO_PREFIX):
528
    if rest:
529
      msg = "Cannot pass options when removing parameter groups: %s" % value
530
      raise errors.ParameterError(msg)
531
    retval = (ident[len(NO_PREFIX):], False)
532
  elif ident.startswith(UN_PREFIX):
533
    if rest:
534
      msg = "Cannot pass options when removing parameter groups: %s" % value
535
      raise errors.ParameterError(msg)
536
    retval = (ident[len(UN_PREFIX):], None)
537
  else:
538
    kv_dict = _SplitKeyVal(opt, rest)
539
    retval = (ident, kv_dict)
540
  return retval
541

    
542

    
543
def check_key_val(option, opt, value):  # pylint: disable=W0613
544
  """Custom parser class for key=val,key=val options.
545

546
  This will store the parsed values as a dict {key: val}.
547

548
  """
549
  return _SplitKeyVal(opt, value)
550

    
551

    
552
def check_bool(option, opt, value): # pylint: disable=W0613
553
  """Custom parser for yes/no options.
554

555
  This will store the parsed value as either True or False.
556

557
  """
558
  value = value.lower()
559
  if value == constants.VALUE_FALSE or value == "no":
560
    return False
561
  elif value == constants.VALUE_TRUE or value == "yes":
562
    return True
563
  else:
564
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
565

    
566

    
567
# completion_suggestion is normally a list. Using numeric values not evaluating
568
# to False for dynamic completion.
569
(OPT_COMPL_MANY_NODES,
570
 OPT_COMPL_ONE_NODE,
571
 OPT_COMPL_ONE_INSTANCE,
572
 OPT_COMPL_ONE_OS,
573
 OPT_COMPL_ONE_IALLOCATOR,
574
 OPT_COMPL_INST_ADD_NODES,
575
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
576

    
577
OPT_COMPL_ALL = frozenset([
578
  OPT_COMPL_MANY_NODES,
579
  OPT_COMPL_ONE_NODE,
580
  OPT_COMPL_ONE_INSTANCE,
581
  OPT_COMPL_ONE_OS,
582
  OPT_COMPL_ONE_IALLOCATOR,
583
  OPT_COMPL_INST_ADD_NODES,
584
  OPT_COMPL_ONE_NODEGROUP,
585
  ])
586

    
587

    
588
class CliOption(Option):
589
  """Custom option class for optparse.
590

591
  """
592
  ATTRS = Option.ATTRS + [
593
    "completion_suggest",
594
    ]
595
  TYPES = Option.TYPES + (
596
    "identkeyval",
597
    "keyval",
598
    "unit",
599
    "bool",
600
    )
601
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
602
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
603
  TYPE_CHECKER["keyval"] = check_key_val
604
  TYPE_CHECKER["unit"] = check_unit
605
  TYPE_CHECKER["bool"] = check_bool
606

    
607

    
608
# optparse.py sets make_option, so we do it for our own option class, too
609
cli_option = CliOption
610

    
611

    
612
_YORNO = "yes|no"
613

    
614
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
615
                       help="Increase debugging level")
616

    
617
NOHDR_OPT = cli_option("--no-headers", default=False,
618
                       action="store_true", dest="no_headers",
619
                       help="Don't display column headers")
620

    
621
SEP_OPT = cli_option("--separator", default=None,
622
                     action="store", dest="separator",
623
                     help=("Separator between output fields"
624
                           " (defaults to one space)"))
625

    
626
USEUNITS_OPT = cli_option("--units", default=None,
627
                          dest="units", choices=("h", "m", "g", "t"),
628
                          help="Specify units for output (one of h/m/g/t)")
629

    
630
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
631
                        type="string", metavar="FIELDS",
632
                        help="Comma separated list of output fields")
633

    
634
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
635
                       default=False, help="Force the operation")
636

    
637
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
638
                         default=False, help="Do not require confirmation")
639

    
640
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
641
                                  action="store_true", default=False,
642
                                  help=("Ignore offline nodes and do as much"
643
                                        " as possible"))
644

    
645
TAG_ADD_OPT = cli_option("--tags", dest="tags",
646
                         default=None, help="Comma-separated list of instance"
647
                                            " tags")
648

    
649
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
650
                         default=None, help="File with tag names")
651

    
652
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
653
                        default=False, action="store_true",
654
                        help=("Submit the job and return the job ID, but"
655
                              " don't wait for the job to finish"))
656

    
657
SYNC_OPT = cli_option("--sync", dest="do_locking",
658
                      default=False, action="store_true",
659
                      help=("Grab locks while doing the queries"
660
                            " in order to ensure more consistent results"))
661

    
662
DRY_RUN_OPT = cli_option("--dry-run", default=False,
663
                         action="store_true",
664
                         help=("Do not execute the operation, just run the"
665
                               " check steps and verify it it could be"
666
                               " executed"))
667

    
668
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
669
                         action="store_true",
670
                         help="Increase the verbosity of the operation")
671

    
672
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
673
                              action="store_true", dest="simulate_errors",
674
                              help="Debugging option that makes the operation"
675
                              " treat most runtime checks as failed")
676

    
677
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
678
                        default=True, action="store_false",
679
                        help="Don't wait for sync (DANGEROUS!)")
680

    
681
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
682
                               help=("Custom disk setup (%s)" %
683
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
684
                               default=None, metavar="TEMPL",
685
                               choices=list(constants.DISK_TEMPLATES))
686

    
687
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
688
                        help="Do not create any network cards for"
689
                        " the instance")
690

    
691
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
692
                               help="Relative path under default cluster-wide"
693
                               " file storage dir to store file-based disks",
694
                               default=None, metavar="<DIR>")
695

    
696
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
697
                                  help="Driver to use for image files",
698
                                  default="loop", metavar="<DRIVER>",
699
                                  choices=list(constants.FILE_DRIVER))
700

    
701
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
702
                            help="Select nodes for the instance automatically"
703
                            " using the <NAME> iallocator plugin",
704
                            default=None, type="string",
705
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
706

    
707
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
708
                            metavar="<NAME>",
709
                            help="Set the default instance allocator plugin",
710
                            default=None, type="string",
711
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
712

    
713
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
714
                    metavar="<os>",
715
                    completion_suggest=OPT_COMPL_ONE_OS)
716

    
717
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
718
                         type="keyval", default={},
719
                         help="OS parameters")
720

    
721
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
722
                               action="store_true", default=False,
723
                               help="Force an unknown variant")
724

    
725
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
726
                            action="store_true", default=False,
727
                            help="Do not install the OS (will"
728
                            " enable no-start)")
729

    
730
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
731
                         type="keyval", default={},
732
                         help="Backend parameters")
733

    
734
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
735
                        default={}, dest="hvparams",
736
                        help="Hypervisor parameters")
737

    
738
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
739
                            help="Hypervisor and hypervisor options, in the"
740
                            " format hypervisor:option=value,option=value,...",
741
                            default=None, type="identkeyval")
742

    
743
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
744
                        help="Hypervisor and hypervisor options, in the"
745
                        " format hypervisor:option=value,option=value,...",
746
                        default=[], action="append", type="identkeyval")
747

    
748
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
749
                           action="store_false",
750
                           help="Don't check that the instance's IP"
751
                           " is alive")
752

    
753
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
754
                             default=True, action="store_false",
755
                             help="Don't check that the instance's name"
756
                             " is resolvable")
757

    
758
NET_OPT = cli_option("--net",
759
                     help="NIC parameters", default=[],
760
                     dest="nics", action="append", type="identkeyval")
761

    
762
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
763
                      dest="disks", action="append", type="identkeyval")
764

    
765
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
766
                         help="Comma-separated list of disks"
767
                         " indices to act on (e.g. 0,2) (optional,"
768
                         " defaults to all disks)")
769

    
770
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
771
                         help="Enforces a single-disk configuration using the"
772
                         " given disk size, in MiB unless a suffix is used",
773
                         default=None, type="unit", metavar="<size>")
774

    
775
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
776
                                dest="ignore_consistency",
777
                                action="store_true", default=False,
778
                                help="Ignore the consistency of the disks on"
779
                                " the secondary")
780

    
781
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
782
                                dest="allow_failover",
783
                                action="store_true", default=False,
784
                                help="If migration is not possible fallback to"
785
                                     " failover")
786

    
787
NONLIVE_OPT = cli_option("--non-live", dest="live",
788
                         default=True, action="store_false",
789
                         help="Do a non-live migration (this usually means"
790
                         " freeze the instance, save the state, transfer and"
791
                         " only then resume running on the secondary node)")
792

    
793
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
794
                                default=None,
795
                                choices=list(constants.HT_MIGRATION_MODES),
796
                                help="Override default migration mode (choose"
797
                                " either live or non-live")
798

    
799
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
800
                                help="Target node and optional secondary node",
801
                                metavar="<pnode>[:<snode>]",
802
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
803

    
804
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
805
                           action="append", metavar="<node>",
806
                           help="Use only this node (can be used multiple"
807
                           " times, if not given defaults to all nodes)",
808
                           completion_suggest=OPT_COMPL_ONE_NODE)
809

    
810
NODEGROUP_OPT_NAME = "--node-group"
811
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
812
                           dest="nodegroup",
813
                           help="Node group (name or uuid)",
814
                           metavar="<nodegroup>",
815
                           default=None, type="string",
816
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
817

    
818
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
819
                             metavar="<node>",
820
                             completion_suggest=OPT_COMPL_ONE_NODE)
821

    
822
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
823
                         action="store_false",
824
                         help="Don't start the instance after creation")
825

    
826
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
827
                         action="store_true", default=False,
828
                         help="Show command instead of executing it")
829

    
830
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
831
                         default=False, action="store_true",
832
                         help="Instead of performing the migration, try to"
833
                         " recover from a failed cleanup. This is safe"
834
                         " to run even if the instance is healthy, but it"
835
                         " will create extra replication traffic and "
836
                         " disrupt briefly the replication (like during the"
837
                         " migration")
838

    
839
STATIC_OPT = cli_option("-s", "--static", dest="static",
840
                        action="store_true", default=False,
841
                        help="Only show configuration data, not runtime data")
842

    
843
ALL_OPT = cli_option("--all", dest="show_all",
844
                     default=False, action="store_true",
845
                     help="Show info on all instances on the cluster."
846
                     " This can take a long time to run, use wisely")
847

    
848
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
849
                           action="store_true", default=False,
850
                           help="Interactive OS reinstall, lists available"
851
                           " OS templates for selection")
852

    
853
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
854
                                 action="store_true", default=False,
855
                                 help="Remove the instance from the cluster"
856
                                 " configuration even if there are failures"
857
                                 " during the removal process")
858

    
859
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
860
                                        dest="ignore_remove_failures",
861
                                        action="store_true", default=False,
862
                                        help="Remove the instance from the"
863
                                        " cluster configuration even if there"
864
                                        " are failures during the removal"
865
                                        " process")
866

    
867
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
868
                                 action="store_true", default=False,
869
                                 help="Remove the instance from the cluster")
870

    
871
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
872
                               help="Specifies the new node for the instance",
873
                               metavar="NODE", default=None,
874
                               completion_suggest=OPT_COMPL_ONE_NODE)
875

    
876
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
877
                               help="Specifies the new secondary node",
878
                               metavar="NODE", default=None,
879
                               completion_suggest=OPT_COMPL_ONE_NODE)
880

    
881
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
882
                            default=False, action="store_true",
883
                            help="Replace the disk(s) on the primary"
884
                                 " node (applies only to internally mirrored"
885
                                 " disk templates, e.g. %s)" %
886
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
887

    
888
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
889
                              default=False, action="store_true",
890
                              help="Replace the disk(s) on the secondary"
891
                                   " node (applies only to internally mirrored"
892
                                   " disk templates, e.g. %s)" %
893
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
894

    
895
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
896
                              default=False, action="store_true",
897
                              help="Lock all nodes and auto-promote as needed"
898
                              " to MC status")
899

    
900
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
901
                              default=False, action="store_true",
902
                              help="Automatically replace faulty disks"
903
                                   " (applies only to internally mirrored"
904
                                   " disk templates, e.g. %s)" %
905
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
906

    
907
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
908
                             default=False, action="store_true",
909
                             help="Ignore current recorded size"
910
                             " (useful for forcing activation when"
911
                             " the recorded size is wrong)")
912

    
913
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
914
                          metavar="<node>",
915
                          completion_suggest=OPT_COMPL_ONE_NODE)
916

    
917
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
918
                         metavar="<dir>")
919

    
920
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
921
                              help="Specify the secondary ip for the node",
922
                              metavar="ADDRESS", default=None)
923

    
924
READD_OPT = cli_option("--readd", dest="readd",
925
                       default=False, action="store_true",
926
                       help="Readd old node after replacing it")
927

    
928
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
929
                                default=True, action="store_false",
930
                                help="Disable SSH key fingerprint checking")
931

    
932
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
933
                                 default=False, action="store_true",
934
                                 help="Force the joining of a node")
935

    
936
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
937
                    type="bool", default=None, metavar=_YORNO,
938
                    help="Set the master_candidate flag on the node")
939

    
940
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
941
                         type="bool", default=None,
942
                         help=("Set the offline flag on the node"
943
                               " (cluster does not communicate with offline"
944
                               " nodes)"))
945

    
946
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
947
                         type="bool", default=None,
948
                         help=("Set the drained flag on the node"
949
                               " (excluded from allocation operations)"))
950

    
951
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
952
                    type="bool", default=None, metavar=_YORNO,
953
                    help="Set the master_capable flag on the node")
954

    
955
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
956
                    type="bool", default=None, metavar=_YORNO,
957
                    help="Set the vm_capable flag on the node")
958

    
959
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
960
                             type="bool", default=None, metavar=_YORNO,
961
                             help="Set the allocatable flag on a volume")
962

    
963
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
964
                               help="Disable support for lvm based instances"
965
                               " (cluster-wide)",
966
                               action="store_false", default=True)
967

    
968
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
969
                            dest="enabled_hypervisors",
970
                            help="Comma-separated list of hypervisors",
971
                            type="string", default=None)
972

    
973
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
974
                            type="keyval", default={},
975
                            help="NIC parameters")
976

    
977
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
978
                         dest="candidate_pool_size", type="int",
979
                         help="Set the candidate pool size")
980

    
981
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
982
                         help=("Enables LVM and specifies the volume group"
983
                               " name (cluster-wide) for disk allocation"
984
                               " [%s]" % constants.DEFAULT_VG),
985
                         metavar="VG", default=None)
986

    
987
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
988
                          help="Destroy cluster", action="store_true")
989

    
990
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
991
                          help="Skip node agreement check (dangerous)",
992
                          action="store_true", default=False)
993

    
994
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
995
                            help="Specify the mac prefix for the instance IP"
996
                            " addresses, in the format XX:XX:XX",
997
                            metavar="PREFIX",
998
                            default=None)
999

    
1000
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1001
                               help="Specify the node interface (cluster-wide)"
1002
                               " on which the master IP address will be added"
1003
                               " (cluster init default: %s)" %
1004
                               constants.DEFAULT_BRIDGE,
1005
                               metavar="NETDEV",
1006
                               default=None)
1007

    
1008
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1009
                                help="Specify the default directory (cluster-"
1010
                                "wide) for storing the file-based disks [%s]" %
1011
                                constants.DEFAULT_FILE_STORAGE_DIR,
1012
                                metavar="DIR",
1013
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1014

    
1015
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1016
                            dest="shared_file_storage_dir",
1017
                            help="Specify the default directory (cluster-"
1018
                            "wide) for storing the shared file-based"
1019
                            " disks [%s]" %
1020
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1021
                            metavar="SHAREDDIR",
1022
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1023

    
1024
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1025
                                   help="Don't modify /etc/hosts",
1026
                                   action="store_false", default=True)
1027

    
1028
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1029
                                    help="Don't initialize SSH keys",
1030
                                    action="store_false", default=True)
1031

    
1032
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1033
                             help="Enable parseable error messages",
1034
                             action="store_true", default=False)
1035

    
1036
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1037
                          help="Skip N+1 memory redundancy tests",
1038
                          action="store_true", default=False)
1039

    
1040
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1041
                             help="Type of reboot: soft/hard/full",
1042
                             default=constants.INSTANCE_REBOOT_HARD,
1043
                             metavar="<REBOOT>",
1044
                             choices=list(constants.REBOOT_TYPES))
1045

    
1046
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1047
                                    dest="ignore_secondaries",
1048
                                    default=False, action="store_true",
1049
                                    help="Ignore errors from secondaries")
1050

    
1051
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1052
                            action="store_false", default=True,
1053
                            help="Don't shutdown the instance (unsafe)")
1054

    
1055
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1056
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1057
                         help="Maximum time to wait")
1058

    
1059
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1060
                         dest="shutdown_timeout", type="int",
1061
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1062
                         help="Maximum time to wait for instance shutdown")
1063

    
1064
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1065
                          default=None,
1066
                          help=("Number of seconds between repetions of the"
1067
                                " command"))
1068

    
1069
EARLY_RELEASE_OPT = cli_option("--early-release",
1070
                               dest="early_release", default=False,
1071
                               action="store_true",
1072
                               help="Release the locks on the secondary"
1073
                               " node(s) early")
1074

    
1075
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1076
                                  dest="new_cluster_cert",
1077
                                  default=False, action="store_true",
1078
                                  help="Generate a new cluster certificate")
1079

    
1080
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1081
                           default=None,
1082
                           help="File containing new RAPI certificate")
1083

    
1084
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1085
                               default=None, action="store_true",
1086
                               help=("Generate a new self-signed RAPI"
1087
                                     " certificate"))
1088

    
1089
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1090
                                    dest="new_confd_hmac_key",
1091
                                    default=False, action="store_true",
1092
                                    help=("Create a new HMAC key for %s" %
1093
                                          constants.CONFD))
1094

    
1095
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1096
                                       dest="cluster_domain_secret",
1097
                                       default=None,
1098
                                       help=("Load new new cluster domain"
1099
                                             " secret from file"))
1100

    
1101
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1102
                                           dest="new_cluster_domain_secret",
1103
                                           default=False, action="store_true",
1104
                                           help=("Create a new cluster domain"
1105
                                                 " secret"))
1106

    
1107
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1108
                              dest="use_replication_network",
1109
                              help="Whether to use the replication network"
1110
                              " for talking to the nodes",
1111
                              action="store_true", default=False)
1112

    
1113
MAINTAIN_NODE_HEALTH_OPT = \
1114
    cli_option("--maintain-node-health", dest="maintain_node_health",
1115
               metavar=_YORNO, default=None, type="bool",
1116
               help="Configure the cluster to automatically maintain node"
1117
               " health, by shutting down unknown instances, shutting down"
1118
               " unknown DRBD devices, etc.")
1119

    
1120
IDENTIFY_DEFAULTS_OPT = \
1121
    cli_option("--identify-defaults", dest="identify_defaults",
1122
               default=False, action="store_true",
1123
               help="Identify which saved instance parameters are equal to"
1124
               " the current cluster defaults and set them as such, instead"
1125
               " of marking them as overridden")
1126

    
1127
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1128
                         action="store", dest="uid_pool",
1129
                         help=("A list of user-ids or user-id"
1130
                               " ranges separated by commas"))
1131

    
1132
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1133
                          action="store", dest="add_uids",
1134
                          help=("A list of user-ids or user-id"
1135
                                " ranges separated by commas, to be"
1136
                                " added to the user-id pool"))
1137

    
1138
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1139
                             action="store", dest="remove_uids",
1140
                             help=("A list of user-ids or user-id"
1141
                                   " ranges separated by commas, to be"
1142
                                   " removed from the user-id pool"))
1143

    
1144
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1145
                             action="store", dest="reserved_lvs",
1146
                             help=("A comma-separated list of reserved"
1147
                                   " logical volumes names, that will be"
1148
                                   " ignored by cluster verify"))
1149

    
1150
ROMAN_OPT = cli_option("--roman",
1151
                       dest="roman_integers", default=False,
1152
                       action="store_true",
1153
                       help="Use roman numbers for positive integers")
1154

    
1155
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1156
                             action="store", default=None,
1157
                             help="Specifies usermode helper for DRBD")
1158

    
1159
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1160
                                action="store_false", default=True,
1161
                                help="Disable support for DRBD")
1162

    
1163
PRIMARY_IP_VERSION_OPT = \
1164
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1165
               action="store", dest="primary_ip_version",
1166
               metavar="%d|%d" % (constants.IP4_VERSION,
1167
                                  constants.IP6_VERSION),
1168
               help="Cluster-wide IP version for primary IP")
1169

    
1170
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1171
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1172
                          choices=_PRIONAME_TO_VALUE.keys(),
1173
                          help="Priority for opcode processing")
1174

    
1175
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1176
                        type="bool", default=None, metavar=_YORNO,
1177
                        help="Sets the hidden flag on the OS")
1178

    
1179
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1180
                        type="bool", default=None, metavar=_YORNO,
1181
                        help="Sets the blacklisted flag on the OS")
1182

    
1183
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1184
                                     type="bool", metavar=_YORNO,
1185
                                     dest="prealloc_wipe_disks",
1186
                                     help=("Wipe disks prior to instance"
1187
                                           " creation"))
1188

    
1189
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1190
                             type="keyval", default=None,
1191
                             help="Node parameters")
1192

    
1193
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1194
                              action="store", metavar="POLICY", default=None,
1195
                              help="Allocation policy for the node group")
1196

    
1197
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1198
                              type="bool", metavar=_YORNO,
1199
                              dest="node_powered",
1200
                              help="Specify if the SoR for node is powered")
1201

    
1202
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1203
                         default=constants.OOB_TIMEOUT,
1204
                         help="Maximum time to wait for out-of-band helper")
1205

    
1206
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1207
                             default=constants.OOB_POWER_DELAY,
1208
                             help="Time in seconds to wait between power-ons")
1209

    
1210
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1211
                              action="store_true", default=False,
1212
                              help=("Whether command argument should be treated"
1213
                                    " as filter"))
1214

    
1215
NO_REMEMBER_OPT = cli_option("--no-remember",
1216
                             dest="no_remember",
1217
                             action="store_true", default=False,
1218
                             help="Perform but do not record the change"
1219
                             " in the configuration")
1220

    
1221
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1222
                              default=False, action="store_true",
1223
                              help="Evacuate primary instances only")
1224

    
1225
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1226
                                default=False, action="store_true",
1227
                                help="Evacuate secondary instances only"
1228
                                     " (applies only to internally mirrored"
1229
                                     " disk templates, e.g. %s)" %
1230
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1231

    
1232
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1233
                                action="store_true", default=False,
1234
                                help="Pause instance at startup")
1235

    
1236
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1237
                          help="Destination node group (name or uuid)",
1238
                          default=None, action="append",
1239
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1240

    
1241

    
1242
#: Options provided by all commands
1243
COMMON_OPTS = [DEBUG_OPT]
1244

    
1245
# common options for creating instances. add and import then add their own
1246
# specific ones.
1247
COMMON_CREATE_OPTS = [
1248
  BACKEND_OPT,
1249
  DISK_OPT,
1250
  DISK_TEMPLATE_OPT,
1251
  FILESTORE_DIR_OPT,
1252
  FILESTORE_DRIVER_OPT,
1253
  HYPERVISOR_OPT,
1254
  IALLOCATOR_OPT,
1255
  NET_OPT,
1256
  NODE_PLACEMENT_OPT,
1257
  NOIPCHECK_OPT,
1258
  NONAMECHECK_OPT,
1259
  NONICS_OPT,
1260
  NWSYNC_OPT,
1261
  OSPARAMS_OPT,
1262
  OS_SIZE_OPT,
1263
  SUBMIT_OPT,
1264
  TAG_ADD_OPT,
1265
  DRY_RUN_OPT,
1266
  PRIORITY_OPT,
1267
  ]
1268

    
1269

    
1270
def _ParseArgs(argv, commands, aliases):
1271
  """Parser for the command line arguments.
1272

1273
  This function parses the arguments and returns the function which
1274
  must be executed together with its (modified) arguments.
1275

1276
  @param argv: the command line
1277
  @param commands: dictionary with special contents, see the design
1278
      doc for cmdline handling
1279
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1280

1281
  """
1282
  if len(argv) == 0:
1283
    binary = "<command>"
1284
  else:
1285
    binary = argv[0].split("/")[-1]
1286

    
1287
  if len(argv) > 1 and argv[1] == "--version":
1288
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1289
             constants.RELEASE_VERSION)
1290
    # Quit right away. That way we don't have to care about this special
1291
    # argument. optparse.py does it the same.
1292
    sys.exit(0)
1293

    
1294
  if len(argv) < 2 or not (argv[1] in commands or
1295
                           argv[1] in aliases):
1296
    # let's do a nice thing
1297
    sortedcmds = commands.keys()
1298
    sortedcmds.sort()
1299

    
1300
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1301
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1302
    ToStdout("")
1303

    
1304
    # compute the max line length for cmd + usage
1305
    mlen = max([len(" %s" % cmd) for cmd in commands])
1306
    mlen = min(60, mlen) # should not get here...
1307

    
1308
    # and format a nice command list
1309
    ToStdout("Commands:")
1310
    for cmd in sortedcmds:
1311
      cmdstr = " %s" % (cmd,)
1312
      help_text = commands[cmd][4]
1313
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1314
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1315
      for line in help_lines:
1316
        ToStdout("%-*s   %s", mlen, "", line)
1317

    
1318
    ToStdout("")
1319

    
1320
    return None, None, None
1321

    
1322
  # get command, unalias it, and look it up in commands
1323
  cmd = argv.pop(1)
1324
  if cmd in aliases:
1325
    if cmd in commands:
1326
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1327
                                   " command" % cmd)
1328

    
1329
    if aliases[cmd] not in commands:
1330
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1331
                                   " command '%s'" % (cmd, aliases[cmd]))
1332

    
1333
    cmd = aliases[cmd]
1334

    
1335
  func, args_def, parser_opts, usage, description = commands[cmd]
1336
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1337
                        description=description,
1338
                        formatter=TitledHelpFormatter(),
1339
                        usage="%%prog %s %s" % (cmd, usage))
1340
  parser.disable_interspersed_args()
1341
  options, args = parser.parse_args()
1342

    
1343
  if not _CheckArguments(cmd, args_def, args):
1344
    return None, None, None
1345

    
1346
  return func, options, args
1347

    
1348

    
1349
def _CheckArguments(cmd, args_def, args):
1350
  """Verifies the arguments using the argument definition.
1351

1352
  Algorithm:
1353

1354
    1. Abort with error if values specified by user but none expected.
1355

1356
    1. For each argument in definition
1357

1358
      1. Keep running count of minimum number of values (min_count)
1359
      1. Keep running count of maximum number of values (max_count)
1360
      1. If it has an unlimited number of values
1361

1362
        1. Abort with error if it's not the last argument in the definition
1363

1364
    1. If last argument has limited number of values
1365

1366
      1. Abort with error if number of values doesn't match or is too large
1367

1368
    1. Abort with error if user didn't pass enough values (min_count)
1369

1370
  """
1371
  if args and not args_def:
1372
    ToStderr("Error: Command %s expects no arguments", cmd)
1373
    return False
1374

    
1375
  min_count = None
1376
  max_count = None
1377
  check_max = None
1378

    
1379
  last_idx = len(args_def) - 1
1380

    
1381
  for idx, arg in enumerate(args_def):
1382
    if min_count is None:
1383
      min_count = arg.min
1384
    elif arg.min is not None:
1385
      min_count += arg.min
1386

    
1387
    if max_count is None:
1388
      max_count = arg.max
1389
    elif arg.max is not None:
1390
      max_count += arg.max
1391

    
1392
    if idx == last_idx:
1393
      check_max = (arg.max is not None)
1394

    
1395
    elif arg.max is None:
1396
      raise errors.ProgrammerError("Only the last argument can have max=None")
1397

    
1398
  if check_max:
1399
    # Command with exact number of arguments
1400
    if (min_count is not None and max_count is not None and
1401
        min_count == max_count and len(args) != min_count):
1402
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1403
      return False
1404

    
1405
    # Command with limited number of arguments
1406
    if max_count is not None and len(args) > max_count:
1407
      ToStderr("Error: Command %s expects only %d argument(s)",
1408
               cmd, max_count)
1409
      return False
1410

    
1411
  # Command with some required arguments
1412
  if min_count is not None and len(args) < min_count:
1413
    ToStderr("Error: Command %s expects at least %d argument(s)",
1414
             cmd, min_count)
1415
    return False
1416

    
1417
  return True
1418

    
1419

    
1420
def SplitNodeOption(value):
1421
  """Splits the value of a --node option.
1422

1423
  """
1424
  if value and ":" in value:
1425
    return value.split(":", 1)
1426
  else:
1427
    return (value, None)
1428

    
1429

    
1430
def CalculateOSNames(os_name, os_variants):
1431
  """Calculates all the names an OS can be called, according to its variants.
1432

1433
  @type os_name: string
1434
  @param os_name: base name of the os
1435
  @type os_variants: list or None
1436
  @param os_variants: list of supported variants
1437
  @rtype: list
1438
  @return: list of valid names
1439

1440
  """
1441
  if os_variants:
1442
    return ["%s+%s" % (os_name, v) for v in os_variants]
1443
  else:
1444
    return [os_name]
1445

    
1446

    
1447
def ParseFields(selected, default):
1448
  """Parses the values of "--field"-like options.
1449

1450
  @type selected: string or None
1451
  @param selected: User-selected options
1452
  @type default: list
1453
  @param default: Default fields
1454

1455
  """
1456
  if selected is None:
1457
    return default
1458

    
1459
  if selected.startswith("+"):
1460
    return default + selected[1:].split(",")
1461

    
1462
  return selected.split(",")
1463

    
1464

    
1465
UsesRPC = rpc.RunWithRPC
1466

    
1467

    
1468
def AskUser(text, choices=None):
1469
  """Ask the user a question.
1470

1471
  @param text: the question to ask
1472

1473
  @param choices: list with elements tuples (input_char, return_value,
1474
      description); if not given, it will default to: [('y', True,
1475
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1476
      note that the '?' char is reserved for help
1477

1478
  @return: one of the return values from the choices list; if input is
1479
      not possible (i.e. not running with a tty, we return the last
1480
      entry from the list
1481

1482
  """
1483
  if choices is None:
1484
    choices = [("y", True, "Perform the operation"),
1485
               ("n", False, "Do not perform the operation")]
1486
  if not choices or not isinstance(choices, list):
1487
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1488
  for entry in choices:
1489
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1490
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1491

    
1492
  answer = choices[-1][1]
1493
  new_text = []
1494
  for line in text.splitlines():
1495
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1496
  text = "\n".join(new_text)
1497
  try:
1498
    f = file("/dev/tty", "a+")
1499
  except IOError:
1500
    return answer
1501
  try:
1502
    chars = [entry[0] for entry in choices]
1503
    chars[-1] = "[%s]" % chars[-1]
1504
    chars.append("?")
1505
    maps = dict([(entry[0], entry[1]) for entry in choices])
1506
    while True:
1507
      f.write(text)
1508
      f.write("\n")
1509
      f.write("/".join(chars))
1510
      f.write(": ")
1511
      line = f.readline(2).strip().lower()
1512
      if line in maps:
1513
        answer = maps[line]
1514
        break
1515
      elif line == "?":
1516
        for entry in choices:
1517
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1518
        f.write("\n")
1519
        continue
1520
  finally:
1521
    f.close()
1522
  return answer
1523

    
1524

    
1525
class JobSubmittedException(Exception):
1526
  """Job was submitted, client should exit.
1527

1528
  This exception has one argument, the ID of the job that was
1529
  submitted. The handler should print this ID.
1530

1531
  This is not an error, just a structured way to exit from clients.
1532

1533
  """
1534

    
1535

    
1536
def SendJob(ops, cl=None):
1537
  """Function to submit an opcode without waiting for the results.
1538

1539
  @type ops: list
1540
  @param ops: list of opcodes
1541
  @type cl: luxi.Client
1542
  @param cl: the luxi client to use for communicating with the master;
1543
             if None, a new client will be created
1544

1545
  """
1546
  if cl is None:
1547
    cl = GetClient()
1548

    
1549
  job_id = cl.SubmitJob(ops)
1550

    
1551
  return job_id
1552

    
1553

    
1554
def GenericPollJob(job_id, cbs, report_cbs):
1555
  """Generic job-polling function.
1556

1557
  @type job_id: number
1558
  @param job_id: Job ID
1559
  @type cbs: Instance of L{JobPollCbBase}
1560
  @param cbs: Data callbacks
1561
  @type report_cbs: Instance of L{JobPollReportCbBase}
1562
  @param report_cbs: Reporting callbacks
1563

1564
  """
1565
  prev_job_info = None
1566
  prev_logmsg_serial = None
1567

    
1568
  status = None
1569

    
1570
  while True:
1571
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1572
                                      prev_logmsg_serial)
1573
    if not result:
1574
      # job not found, go away!
1575
      raise errors.JobLost("Job with id %s lost" % job_id)
1576

    
1577
    if result == constants.JOB_NOTCHANGED:
1578
      report_cbs.ReportNotChanged(job_id, status)
1579

    
1580
      # Wait again
1581
      continue
1582

    
1583
    # Split result, a tuple of (field values, log entries)
1584
    (job_info, log_entries) = result
1585
    (status, ) = job_info
1586

    
1587
    if log_entries:
1588
      for log_entry in log_entries:
1589
        (serial, timestamp, log_type, message) = log_entry
1590
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1591
                                    log_type, message)
1592
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1593

    
1594
    # TODO: Handle canceled and archived jobs
1595
    elif status in (constants.JOB_STATUS_SUCCESS,
1596
                    constants.JOB_STATUS_ERROR,
1597
                    constants.JOB_STATUS_CANCELING,
1598
                    constants.JOB_STATUS_CANCELED):
1599
      break
1600

    
1601
    prev_job_info = job_info
1602

    
1603
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1604
  if not jobs:
1605
    raise errors.JobLost("Job with id %s lost" % job_id)
1606

    
1607
  status, opstatus, result = jobs[0]
1608

    
1609
  if status == constants.JOB_STATUS_SUCCESS:
1610
    return result
1611

    
1612
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1613
    raise errors.OpExecError("Job was canceled")
1614

    
1615
  has_ok = False
1616
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1617
    if status == constants.OP_STATUS_SUCCESS:
1618
      has_ok = True
1619
    elif status == constants.OP_STATUS_ERROR:
1620
      errors.MaybeRaise(msg)
1621

    
1622
      if has_ok:
1623
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1624
                                 (idx, msg))
1625

    
1626
      raise errors.OpExecError(str(msg))
1627

    
1628
  # default failure mode
1629
  raise errors.OpExecError(result)
1630

    
1631

    
1632
class JobPollCbBase:
1633
  """Base class for L{GenericPollJob} callbacks.
1634

1635
  """
1636
  def __init__(self):
1637
    """Initializes this class.
1638

1639
    """
1640

    
1641
  def WaitForJobChangeOnce(self, job_id, fields,
1642
                           prev_job_info, prev_log_serial):
1643
    """Waits for changes on a job.
1644

1645
    """
1646
    raise NotImplementedError()
1647

    
1648
  def QueryJobs(self, job_ids, fields):
1649
    """Returns the selected fields for the selected job IDs.
1650

1651
    @type job_ids: list of numbers
1652
    @param job_ids: Job IDs
1653
    @type fields: list of strings
1654
    @param fields: Fields
1655

1656
    """
1657
    raise NotImplementedError()
1658

    
1659

    
1660
class JobPollReportCbBase:
1661
  """Base class for L{GenericPollJob} reporting callbacks.
1662

1663
  """
1664
  def __init__(self):
1665
    """Initializes this class.
1666

1667
    """
1668

    
1669
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1670
    """Handles a log message.
1671

1672
    """
1673
    raise NotImplementedError()
1674

    
1675
  def ReportNotChanged(self, job_id, status):
1676
    """Called for if a job hasn't changed in a while.
1677

1678
    @type job_id: number
1679
    @param job_id: Job ID
1680
    @type status: string or None
1681
    @param status: Job status if available
1682

1683
    """
1684
    raise NotImplementedError()
1685

    
1686

    
1687
class _LuxiJobPollCb(JobPollCbBase):
1688
  def __init__(self, cl):
1689
    """Initializes this class.
1690

1691
    """
1692
    JobPollCbBase.__init__(self)
1693
    self.cl = cl
1694

    
1695
  def WaitForJobChangeOnce(self, job_id, fields,
1696
                           prev_job_info, prev_log_serial):
1697
    """Waits for changes on a job.
1698

1699
    """
1700
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1701
                                        prev_job_info, prev_log_serial)
1702

    
1703
  def QueryJobs(self, job_ids, fields):
1704
    """Returns the selected fields for the selected job IDs.
1705

1706
    """
1707
    return self.cl.QueryJobs(job_ids, fields)
1708

    
1709

    
1710
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1711
  def __init__(self, feedback_fn):
1712
    """Initializes this class.
1713

1714
    """
1715
    JobPollReportCbBase.__init__(self)
1716

    
1717
    self.feedback_fn = feedback_fn
1718

    
1719
    assert callable(feedback_fn)
1720

    
1721
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1722
    """Handles a log message.
1723

1724
    """
1725
    self.feedback_fn((timestamp, log_type, log_msg))
1726

    
1727
  def ReportNotChanged(self, job_id, status):
1728
    """Called if a job hasn't changed in a while.
1729

1730
    """
1731
    # Ignore
1732

    
1733

    
1734
class StdioJobPollReportCb(JobPollReportCbBase):
1735
  def __init__(self):
1736
    """Initializes this class.
1737

1738
    """
1739
    JobPollReportCbBase.__init__(self)
1740

    
1741
    self.notified_queued = False
1742
    self.notified_waitlock = False
1743

    
1744
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1745
    """Handles a log message.
1746

1747
    """
1748
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1749
             FormatLogMessage(log_type, log_msg))
1750

    
1751
  def ReportNotChanged(self, job_id, status):
1752
    """Called if a job hasn't changed in a while.
1753

1754
    """
1755
    if status is None:
1756
      return
1757

    
1758
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1759
      ToStderr("Job %s is waiting in queue", job_id)
1760
      self.notified_queued = True
1761

    
1762
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1763
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1764
      self.notified_waitlock = True
1765

    
1766

    
1767
def FormatLogMessage(log_type, log_msg):
1768
  """Formats a job message according to its type.
1769

1770
  """
1771
  if log_type != constants.ELOG_MESSAGE:
1772
    log_msg = str(log_msg)
1773

    
1774
  return utils.SafeEncode(log_msg)
1775

    
1776

    
1777
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1778
  """Function to poll for the result of a job.
1779

1780
  @type job_id: job identified
1781
  @param job_id: the job to poll for results
1782
  @type cl: luxi.Client
1783
  @param cl: the luxi client to use for communicating with the master;
1784
             if None, a new client will be created
1785

1786
  """
1787
  if cl is None:
1788
    cl = GetClient()
1789

    
1790
  if reporter is None:
1791
    if feedback_fn:
1792
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1793
    else:
1794
      reporter = StdioJobPollReportCb()
1795
  elif feedback_fn:
1796
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1797

    
1798
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1799

    
1800

    
1801
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1802
  """Legacy function to submit an opcode.
1803

1804
  This is just a simple wrapper over the construction of the processor
1805
  instance. It should be extended to better handle feedback and
1806
  interaction functions.
1807

1808
  """
1809
  if cl is None:
1810
    cl = GetClient()
1811

    
1812
  SetGenericOpcodeOpts([op], opts)
1813

    
1814
  job_id = SendJob([op], cl=cl)
1815

    
1816
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1817
                       reporter=reporter)
1818

    
1819
  return op_results[0]
1820

    
1821

    
1822
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1823
  """Wrapper around SubmitOpCode or SendJob.
1824

1825
  This function will decide, based on the 'opts' parameter, whether to
1826
  submit and wait for the result of the opcode (and return it), or
1827
  whether to just send the job and print its identifier. It is used in
1828
  order to simplify the implementation of the '--submit' option.
1829

1830
  It will also process the opcodes if we're sending the via SendJob
1831
  (otherwise SubmitOpCode does it).
1832

1833
  """
1834
  if opts and opts.submit_only:
1835
    job = [op]
1836
    SetGenericOpcodeOpts(job, opts)
1837
    job_id = SendJob(job, cl=cl)
1838
    raise JobSubmittedException(job_id)
1839
  else:
1840
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1841

    
1842

    
1843
def SetGenericOpcodeOpts(opcode_list, options):
1844
  """Processor for generic options.
1845

1846
  This function updates the given opcodes based on generic command
1847
  line options (like debug, dry-run, etc.).
1848

1849
  @param opcode_list: list of opcodes
1850
  @param options: command line options or None
1851
  @return: None (in-place modification)
1852

1853
  """
1854
  if not options:
1855
    return
1856
  for op in opcode_list:
1857
    op.debug_level = options.debug
1858
    if hasattr(options, "dry_run"):
1859
      op.dry_run = options.dry_run
1860
    if getattr(options, "priority", None) is not None:
1861
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1862

    
1863

    
1864
def GetClient():
1865
  # TODO: Cache object?
1866
  try:
1867
    client = luxi.Client()
1868
  except luxi.NoMasterError:
1869
    ss = ssconf.SimpleStore()
1870

    
1871
    # Try to read ssconf file
1872
    try:
1873
      ss.GetMasterNode()
1874
    except errors.ConfigurationError:
1875
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1876
                                 " not part of a cluster")
1877

    
1878
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1879
    if master != myself:
1880
      raise errors.OpPrereqError("This is not the master node, please connect"
1881
                                 " to node '%s' and rerun the command" %
1882
                                 master)
1883
    raise
1884
  return client
1885

    
1886

    
1887
def FormatError(err):
1888
  """Return a formatted error message for a given error.
1889

1890
  This function takes an exception instance and returns a tuple
1891
  consisting of two values: first, the recommended exit code, and
1892
  second, a string describing the error message (not
1893
  newline-terminated).
1894

1895
  """
1896
  retcode = 1
1897
  obuf = StringIO()
1898
  msg = str(err)
1899
  if isinstance(err, errors.ConfigurationError):
1900
    txt = "Corrupt configuration file: %s" % msg
1901
    logging.error(txt)
1902
    obuf.write(txt + "\n")
1903
    obuf.write("Aborting.")
1904
    retcode = 2
1905
  elif isinstance(err, errors.HooksAbort):
1906
    obuf.write("Failure: hooks execution failed:\n")
1907
    for node, script, out in err.args[0]:
1908
      if out:
1909
        obuf.write("  node: %s, script: %s, output: %s\n" %
1910
                   (node, script, out))
1911
      else:
1912
        obuf.write("  node: %s, script: %s (no output)\n" %
1913
                   (node, script))
1914
  elif isinstance(err, errors.HooksFailure):
1915
    obuf.write("Failure: hooks general failure: %s" % msg)
1916
  elif isinstance(err, errors.ResolverError):
1917
    this_host = netutils.Hostname.GetSysName()
1918
    if err.args[0] == this_host:
1919
      msg = "Failure: can't resolve my own hostname ('%s')"
1920
    else:
1921
      msg = "Failure: can't resolve hostname '%s'"
1922
    obuf.write(msg % err.args[0])
1923
  elif isinstance(err, errors.OpPrereqError):
1924
    if len(err.args) == 2:
1925
      obuf.write("Failure: prerequisites not met for this"
1926
               " operation:\nerror type: %s, error details:\n%s" %
1927
                 (err.args[1], err.args[0]))
1928
    else:
1929
      obuf.write("Failure: prerequisites not met for this"
1930
                 " operation:\n%s" % msg)
1931
  elif isinstance(err, errors.OpExecError):
1932
    obuf.write("Failure: command execution error:\n%s" % msg)
1933
  elif isinstance(err, errors.TagError):
1934
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1935
  elif isinstance(err, errors.JobQueueDrainError):
1936
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1937
               " accept new requests\n")
1938
  elif isinstance(err, errors.JobQueueFull):
1939
    obuf.write("Failure: the job queue is full and doesn't accept new"
1940
               " job submissions until old jobs are archived\n")
1941
  elif isinstance(err, errors.TypeEnforcementError):
1942
    obuf.write("Parameter Error: %s" % msg)
1943
  elif isinstance(err, errors.ParameterError):
1944
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1945
  elif isinstance(err, luxi.NoMasterError):
1946
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1947
               " and listening for connections?")
1948
  elif isinstance(err, luxi.TimeoutError):
1949
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1950
               " been submitted and will continue to run even if the call"
1951
               " timed out. Useful commands in this situation are \"gnt-job"
1952
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1953
    obuf.write(msg)
1954
  elif isinstance(err, luxi.PermissionError):
1955
    obuf.write("It seems you don't have permissions to connect to the"
1956
               " master daemon.\nPlease retry as a different user.")
1957
  elif isinstance(err, luxi.ProtocolError):
1958
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1959
               "%s" % msg)
1960
  elif isinstance(err, errors.JobLost):
1961
    obuf.write("Error checking job status: %s" % msg)
1962
  elif isinstance(err, errors.QueryFilterParseError):
1963
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1964
    obuf.write("\n".join(err.GetDetails()))
1965
  elif isinstance(err, errors.GenericError):
1966
    obuf.write("Unhandled Ganeti error: %s" % msg)
1967
  elif isinstance(err, JobSubmittedException):
1968
    obuf.write("JobID: %s\n" % err.args[0])
1969
    retcode = 0
1970
  else:
1971
    obuf.write("Unhandled exception: %s" % msg)
1972
  return retcode, obuf.getvalue().rstrip("\n")
1973

    
1974

    
1975
def GenericMain(commands, override=None, aliases=None):
1976
  """Generic main function for all the gnt-* commands.
1977

1978
  Arguments:
1979
    - commands: a dictionary with a special structure, see the design doc
1980
                for command line handling.
1981
    - override: if not None, we expect a dictionary with keys that will
1982
                override command line options; this can be used to pass
1983
                options from the scripts to generic functions
1984
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1985

1986
  """
1987
  # save the program name and the entire command line for later logging
1988
  if sys.argv:
1989
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1990
    if len(sys.argv) >= 2:
1991
      binary += " " + sys.argv[1]
1992
      old_cmdline = " ".join(sys.argv[2:])
1993
    else:
1994
      old_cmdline = ""
1995
  else:
1996
    binary = "<unknown program>"
1997
    old_cmdline = ""
1998

    
1999
  if aliases is None:
2000
    aliases = {}
2001

    
2002
  try:
2003
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
2004
  except errors.ParameterError, err:
2005
    result, err_msg = FormatError(err)
2006
    ToStderr(err_msg)
2007
    return 1
2008

    
2009
  if func is None: # parse error
2010
    return 1
2011

    
2012
  if override is not None:
2013
    for key, val in override.iteritems():
2014
      setattr(options, key, val)
2015

    
2016
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2017
                     stderr_logging=True)
2018

    
2019
  if old_cmdline:
2020
    logging.info("run with arguments '%s'", old_cmdline)
2021
  else:
2022
    logging.info("run with no arguments")
2023

    
2024
  try:
2025
    result = func(options, args)
2026
  except (errors.GenericError, luxi.ProtocolError,
2027
          JobSubmittedException), err:
2028
    result, err_msg = FormatError(err)
2029
    logging.exception("Error during command processing")
2030
    ToStderr(err_msg)
2031
  except KeyboardInterrupt:
2032
    result = constants.EXIT_FAILURE
2033
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2034
             " might have been submitted and"
2035
             " will continue to run in the background.")
2036
  except IOError, err:
2037
    if err.errno == errno.EPIPE:
2038
      # our terminal went away, we'll exit
2039
      sys.exit(constants.EXIT_FAILURE)
2040
    else:
2041
      raise
2042

    
2043
  return result
2044

    
2045

    
2046
def ParseNicOption(optvalue):
2047
  """Parses the value of the --net option(s).
2048

2049
  """
2050
  try:
2051
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2052
  except (TypeError, ValueError), err:
2053
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2054

    
2055
  nics = [{}] * nic_max
2056
  for nidx, ndict in optvalue:
2057
    nidx = int(nidx)
2058

    
2059
    if not isinstance(ndict, dict):
2060
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2061
                                 " got %s" % (nidx, ndict))
2062

    
2063
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2064

    
2065
    nics[nidx] = ndict
2066

    
2067
  return nics
2068

    
2069

    
2070
def GenericInstanceCreate(mode, opts, args):
2071
  """Add an instance to the cluster via either creation or import.
2072

2073
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2074
  @param opts: the command line options selected by the user
2075
  @type args: list
2076
  @param args: should contain only one element, the new instance name
2077
  @rtype: int
2078
  @return: the desired exit code
2079

2080
  """
2081
  instance = args[0]
2082

    
2083
  (pnode, snode) = SplitNodeOption(opts.node)
2084

    
2085
  hypervisor = None
2086
  hvparams = {}
2087
  if opts.hypervisor:
2088
    hypervisor, hvparams = opts.hypervisor
2089

    
2090
  if opts.nics:
2091
    nics = ParseNicOption(opts.nics)
2092
  elif opts.no_nics:
2093
    # no nics
2094
    nics = []
2095
  elif mode == constants.INSTANCE_CREATE:
2096
    # default of one nic, all auto
2097
    nics = [{}]
2098
  else:
2099
    # mode == import
2100
    nics = []
2101

    
2102
  if opts.disk_template == constants.DT_DISKLESS:
2103
    if opts.disks or opts.sd_size is not None:
2104
      raise errors.OpPrereqError("Diskless instance but disk"
2105
                                 " information passed")
2106
    disks = []
2107
  else:
2108
    if (not opts.disks and not opts.sd_size
2109
        and mode == constants.INSTANCE_CREATE):
2110
      raise errors.OpPrereqError("No disk information specified")
2111
    if opts.disks and opts.sd_size is not None:
2112
      raise errors.OpPrereqError("Please use either the '--disk' or"
2113
                                 " '-s' option")
2114
    if opts.sd_size is not None:
2115
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2116

    
2117
    if opts.disks:
2118
      try:
2119
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2120
      except ValueError, err:
2121
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2122
      disks = [{}] * disk_max
2123
    else:
2124
      disks = []
2125
    for didx, ddict in opts.disks:
2126
      didx = int(didx)
2127
      if not isinstance(ddict, dict):
2128
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2129
        raise errors.OpPrereqError(msg)
2130
      elif constants.IDISK_SIZE in ddict:
2131
        if constants.IDISK_ADOPT in ddict:
2132
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2133
                                     " (disk %d)" % didx)
2134
        try:
2135
          ddict[constants.IDISK_SIZE] = \
2136
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2137
        except ValueError, err:
2138
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2139
                                     (didx, err))
2140
      elif constants.IDISK_ADOPT in ddict:
2141
        if mode == constants.INSTANCE_IMPORT:
2142
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2143
                                     " import")
2144
        ddict[constants.IDISK_SIZE] = 0
2145
      else:
2146
        raise errors.OpPrereqError("Missing size or adoption source for"
2147
                                   " disk %d" % didx)
2148
      disks[didx] = ddict
2149

    
2150
  if opts.tags is not None:
2151
    tags = opts.tags.split(",")
2152
  else:
2153
    tags = []
2154

    
2155
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2156
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2157

    
2158
  if mode == constants.INSTANCE_CREATE:
2159
    start = opts.start
2160
    os_type = opts.os
2161
    force_variant = opts.force_variant
2162
    src_node = None
2163
    src_path = None
2164
    no_install = opts.no_install
2165
    identify_defaults = False
2166
  elif mode == constants.INSTANCE_IMPORT:
2167
    start = False
2168
    os_type = None
2169
    force_variant = False
2170
    src_node = opts.src_node
2171
    src_path = opts.src_dir
2172
    no_install = None
2173
    identify_defaults = opts.identify_defaults
2174
  else:
2175
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2176

    
2177
  op = opcodes.OpInstanceCreate(instance_name=instance,
2178
                                disks=disks,
2179
                                disk_template=opts.disk_template,
2180
                                nics=nics,
2181
                                pnode=pnode, snode=snode,
2182
                                ip_check=opts.ip_check,
2183
                                name_check=opts.name_check,
2184
                                wait_for_sync=opts.wait_for_sync,
2185
                                file_storage_dir=opts.file_storage_dir,
2186
                                file_driver=opts.file_driver,
2187
                                iallocator=opts.iallocator,
2188
                                hypervisor=hypervisor,
2189
                                hvparams=hvparams,
2190
                                beparams=opts.beparams,
2191
                                osparams=opts.osparams,
2192
                                mode=mode,
2193
                                start=start,
2194
                                os_type=os_type,
2195
                                force_variant=force_variant,
2196
                                src_node=src_node,
2197
                                src_path=src_path,
2198
                                tags=tags,
2199
                                no_install=no_install,
2200
                                identify_defaults=identify_defaults)
2201

    
2202
  SubmitOrSend(op, opts)
2203
  return 0
2204

    
2205

    
2206
class _RunWhileClusterStoppedHelper:
2207
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2208

2209
  """
2210
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2211
    """Initializes this class.
2212

2213
    @type feedback_fn: callable
2214
    @param feedback_fn: Feedback function
2215
    @type cluster_name: string
2216
    @param cluster_name: Cluster name
2217
    @type master_node: string
2218
    @param master_node Master node name
2219
    @type online_nodes: list
2220
    @param online_nodes: List of names of online nodes
2221

2222
    """
2223
    self.feedback_fn = feedback_fn
2224
    self.cluster_name = cluster_name
2225
    self.master_node = master_node
2226
    self.online_nodes = online_nodes
2227

    
2228
    self.ssh = ssh.SshRunner(self.cluster_name)
2229

    
2230
    self.nonmaster_nodes = [name for name in online_nodes
2231
                            if name != master_node]
2232

    
2233
    assert self.master_node not in self.nonmaster_nodes
2234

    
2235
  def _RunCmd(self, node_name, cmd):
2236
    """Runs a command on the local or a remote machine.
2237

2238
    @type node_name: string
2239
    @param node_name: Machine name
2240
    @type cmd: list
2241
    @param cmd: Command
2242

2243
    """
2244
    if node_name is None or node_name == self.master_node:
2245
      # No need to use SSH
2246
      result = utils.RunCmd(cmd)
2247
    else:
2248
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2249

    
2250
    if result.failed:
2251
      errmsg = ["Failed to run command %s" % result.cmd]
2252
      if node_name:
2253
        errmsg.append("on node %s" % node_name)
2254
      errmsg.append(": exitcode %s and error %s" %
2255
                    (result.exit_code, result.output))
2256
      raise errors.OpExecError(" ".join(errmsg))
2257

    
2258
  def Call(self, fn, *args):
2259
    """Call function while all daemons are stopped.
2260

2261
    @type fn: callable
2262
    @param fn: Function to be called
2263

2264
    """
2265
    # Pause watcher by acquiring an exclusive lock on watcher state file
2266
    self.feedback_fn("Blocking watcher")
2267
    watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2268
    try:
2269
      # TODO: Currently, this just blocks. There's no timeout.
2270
      # TODO: Should it be a shared lock?
2271
      watcher_block.Exclusive(blocking=True)
2272

    
2273
      # Stop master daemons, so that no new jobs can come in and all running
2274
      # ones are finished
2275
      self.feedback_fn("Stopping master daemons")
2276
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2277
      try:
2278
        # Stop daemons on all nodes
2279
        for node_name in self.online_nodes:
2280
          self.feedback_fn("Stopping daemons on %s" % node_name)
2281
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2282

    
2283
        # All daemons are shut down now
2284
        try:
2285
          return fn(self, *args)
2286
        except Exception, err:
2287
          _, errmsg = FormatError(err)
2288
          logging.exception("Caught exception")
2289
          self.feedback_fn(errmsg)
2290
          raise
2291
      finally:
2292
        # Start cluster again, master node last
2293
        for node_name in self.nonmaster_nodes + [self.master_node]:
2294
          self.feedback_fn("Starting daemons on %s" % node_name)
2295
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2296
    finally:
2297
      # Resume watcher
2298
      watcher_block.Close()
2299

    
2300

    
2301
def RunWhileClusterStopped(feedback_fn, fn, *args):
2302
  """Calls a function while all cluster daemons are stopped.
2303

2304
  @type feedback_fn: callable
2305
  @param feedback_fn: Feedback function
2306
  @type fn: callable
2307
  @param fn: Function to be called when daemons are stopped
2308

2309
  """
2310
  feedback_fn("Gathering cluster information")
2311

    
2312
  # This ensures we're running on the master daemon
2313
  cl = GetClient()
2314

    
2315
  (cluster_name, master_node) = \
2316
    cl.QueryConfigValues(["cluster_name", "master_node"])
2317

    
2318
  online_nodes = GetOnlineNodes([], cl=cl)
2319

    
2320
  # Don't keep a reference to the client. The master daemon will go away.
2321
  del cl
2322

    
2323
  assert master_node in online_nodes
2324

    
2325
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2326
                                       online_nodes).Call(fn, *args)
2327

    
2328

    
2329
def GenerateTable(headers, fields, separator, data,
2330
                  numfields=None, unitfields=None,
2331
                  units=None):
2332
  """Prints a table with headers and different fields.
2333

2334
  @type headers: dict
2335
  @param headers: dictionary mapping field names to headers for
2336
      the table
2337
  @type fields: list
2338
  @param fields: the field names corresponding to each row in
2339
      the data field
2340
  @param separator: the separator to be used; if this is None,
2341
      the default 'smart' algorithm is used which computes optimal
2342
      field width, otherwise just the separator is used between
2343
      each field
2344
  @type data: list
2345
  @param data: a list of lists, each sublist being one row to be output
2346
  @type numfields: list
2347
  @param numfields: a list with the fields that hold numeric
2348
      values and thus should be right-aligned
2349
  @type unitfields: list
2350
  @param unitfields: a list with the fields that hold numeric
2351
      values that should be formatted with the units field
2352
  @type units: string or None
2353
  @param units: the units we should use for formatting, or None for
2354
      automatic choice (human-readable for non-separator usage, otherwise
2355
      megabytes); this is a one-letter string
2356

2357
  """
2358
  if units is None:
2359
    if separator:
2360
      units = "m"
2361
    else:
2362
      units = "h"
2363

    
2364
  if numfields is None:
2365
    numfields = []
2366
  if unitfields is None:
2367
    unitfields = []
2368

    
2369
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2370
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2371

    
2372
  format_fields = []
2373
  for field in fields:
2374
    if headers and field not in headers:
2375
      # TODO: handle better unknown fields (either revert to old
2376
      # style of raising exception, or deal more intelligently with
2377
      # variable fields)
2378
      headers[field] = field
2379
    if separator is not None:
2380
      format_fields.append("%s")
2381
    elif numfields.Matches(field):
2382
      format_fields.append("%*s")
2383
    else:
2384
      format_fields.append("%-*s")
2385

    
2386
  if separator is None:
2387
    mlens = [0 for name in fields]
2388
    format_str = " ".join(format_fields)
2389
  else:
2390
    format_str = separator.replace("%", "%%").join(format_fields)
2391

    
2392
  for row in data:
2393
    if row is None:
2394
      continue
2395
    for idx, val in enumerate(row):
2396
      if unitfields.Matches(fields[idx]):
2397
        try:
2398
          val = int(val)
2399
        except (TypeError, ValueError):
2400
          pass
2401
        else:
2402
          val = row[idx] = utils.FormatUnit(val, units)
2403
      val = row[idx] = str(val)
2404
      if separator is None:
2405
        mlens[idx] = max(mlens[idx], len(val))
2406

    
2407
  result = []
2408
  if headers:
2409
    args = []
2410
    for idx, name in enumerate(fields):
2411
      hdr = headers[name]
2412
      if separator is None:
2413
        mlens[idx] = max(mlens[idx], len(hdr))
2414
        args.append(mlens[idx])
2415
      args.append(hdr)
2416
    result.append(format_str % tuple(args))
2417

    
2418
  if separator is None:
2419
    assert len(mlens) == len(fields)
2420

    
2421
    if fields and not numfields.Matches(fields[-1]):
2422
      mlens[-1] = 0
2423

    
2424
  for line in data:
2425
    args = []
2426
    if line is None:
2427
      line = ["-" for _ in fields]
2428
    for idx in range(len(fields)):
2429
      if separator is None:
2430
        args.append(mlens[idx])
2431
      args.append(line[idx])
2432
    result.append(format_str % tuple(args))
2433

    
2434
  return result
2435

    
2436

    
2437
def _FormatBool(value):
2438
  """Formats a boolean value as a string.
2439

2440
  """
2441
  if value:
2442
    return "Y"
2443
  return "N"
2444

    
2445

    
2446
#: Default formatting for query results; (callback, align right)
2447
_DEFAULT_FORMAT_QUERY = {
2448
  constants.QFT_TEXT: (str, False),
2449
  constants.QFT_BOOL: (_FormatBool, False),
2450
  constants.QFT_NUMBER: (str, True),
2451
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2452
  constants.QFT_OTHER: (str, False),
2453
  constants.QFT_UNKNOWN: (str, False),
2454
  }
2455

    
2456

    
2457
def _GetColumnFormatter(fdef, override, unit):
2458
  """Returns formatting function for a field.
2459

2460
  @type fdef: L{objects.QueryFieldDefinition}
2461
  @type override: dict
2462
  @param override: Dictionary for overriding field formatting functions,
2463
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2464
  @type unit: string
2465
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2466
  @rtype: tuple; (callable, bool)
2467
  @return: Returns the function to format a value (takes one parameter) and a
2468
    boolean for aligning the value on the right-hand side
2469

2470
  """
2471
  fmt = override.get(fdef.name, None)
2472
  if fmt is not None:
2473
    return fmt
2474

    
2475
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2476

    
2477
  if fdef.kind == constants.QFT_UNIT:
2478
    # Can't keep this information in the static dictionary
2479
    return (lambda value: utils.FormatUnit(value, unit), True)
2480

    
2481
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2482
  if fmt is not None:
2483
    return fmt
2484

    
2485
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2486

    
2487

    
2488
class _QueryColumnFormatter:
2489
  """Callable class for formatting fields of a query.
2490

2491
  """
2492
  def __init__(self, fn, status_fn, verbose):
2493
    """Initializes this class.
2494

2495
    @type fn: callable
2496
    @param fn: Formatting function
2497
    @type status_fn: callable
2498
    @param status_fn: Function to report fields' status
2499
    @type verbose: boolean
2500
    @param verbose: whether to use verbose field descriptions or not
2501

2502
    """
2503
    self._fn = fn
2504
    self._status_fn = status_fn
2505
    self._verbose = verbose
2506

    
2507
  def __call__(self, data):
2508
    """Returns a field's string representation.
2509

2510
    """
2511
    (status, value) = data
2512

    
2513
    # Report status
2514
    self._status_fn(status)
2515

    
2516
    if status == constants.RS_NORMAL:
2517
      return self._fn(value)
2518

    
2519
    assert value is None, \
2520
           "Found value %r for abnormal status %s" % (value, status)
2521

    
2522
    return FormatResultError(status, self._verbose)
2523

    
2524

    
2525
def FormatResultError(status, verbose):
2526
  """Formats result status other than L{constants.RS_NORMAL}.
2527

2528
  @param status: The result status
2529
  @type verbose: boolean
2530
  @param verbose: Whether to return the verbose text
2531
  @return: Text of result status
2532

2533
  """
2534
  assert status != constants.RS_NORMAL, \
2535
         "FormatResultError called with status equal to constants.RS_NORMAL"
2536
  try:
2537
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2538
  except KeyError:
2539
    raise NotImplementedError("Unknown status %s" % status)
2540
  else:
2541
    if verbose:
2542
      return verbose_text
2543
    return normal_text
2544

    
2545

    
2546
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2547
                      header=False, verbose=False):
2548
  """Formats data in L{objects.QueryResponse}.
2549

2550
  @type result: L{objects.QueryResponse}
2551
  @param result: result of query operation
2552
  @type unit: string
2553
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2554
    see L{utils.text.FormatUnit}
2555
  @type format_override: dict
2556
  @param format_override: Dictionary for overriding field formatting functions,
2557
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2558
  @type separator: string or None
2559
  @param separator: String used to separate fields
2560
  @type header: bool
2561
  @param header: Whether to output header row
2562
  @type verbose: boolean
2563
  @param verbose: whether to use verbose field descriptions or not
2564

2565
  """
2566
  if unit is None:
2567
    if separator:
2568
      unit = "m"
2569
    else:
2570
      unit = "h"
2571

    
2572
  if format_override is None:
2573
    format_override = {}
2574

    
2575
  stats = dict.fromkeys(constants.RS_ALL, 0)
2576

    
2577
  def _RecordStatus(status):
2578
    if status in stats:
2579
      stats[status] += 1
2580

    
2581
  columns = []
2582
  for fdef in result.fields:
2583
    assert fdef.title and fdef.name
2584
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2585
    columns.append(TableColumn(fdef.title,
2586
                               _QueryColumnFormatter(fn, _RecordStatus,
2587
                                                     verbose),
2588
                               align_right))
2589

    
2590
  table = FormatTable(result.data, columns, header, separator)
2591

    
2592
  # Collect statistics
2593
  assert len(stats) == len(constants.RS_ALL)
2594
  assert compat.all(count >= 0 for count in stats.values())
2595

    
2596
  # Determine overall status. If there was no data, unknown fields must be
2597
  # detected via the field definitions.
2598
  if (stats[constants.RS_UNKNOWN] or
2599
      (not result.data and _GetUnknownFields(result.fields))):
2600
    status = QR_UNKNOWN
2601
  elif compat.any(count > 0 for key, count in stats.items()
2602
                  if key != constants.RS_NORMAL):
2603
    status = QR_INCOMPLETE
2604
  else:
2605
    status = QR_NORMAL
2606

    
2607
  return (status, table)
2608

    
2609

    
2610
def _GetUnknownFields(fdefs):
2611
  """Returns list of unknown fields included in C{fdefs}.
2612

2613
  @type fdefs: list of L{objects.QueryFieldDefinition}
2614

2615
  """
2616
  return [fdef for fdef in fdefs
2617
          if fdef.kind == constants.QFT_UNKNOWN]
2618

    
2619

    
2620
def _WarnUnknownFields(fdefs):
2621
  """Prints a warning to stderr if a query included unknown fields.
2622

2623
  @type fdefs: list of L{objects.QueryFieldDefinition}
2624

2625
  """
2626
  unknown = _GetUnknownFields(fdefs)
2627
  if unknown:
2628
    ToStderr("Warning: Queried for unknown fields %s",
2629
             utils.CommaJoin(fdef.name for fdef in unknown))
2630
    return True
2631

    
2632
  return False
2633

    
2634

    
2635
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2636
                format_override=None, verbose=False, force_filter=False):
2637
  """Generic implementation for listing all items of a resource.
2638

2639
  @param resource: One of L{constants.QR_VIA_LUXI}
2640
  @type fields: list of strings
2641
  @param fields: List of fields to query for
2642
  @type names: list of strings
2643
  @param names: Names of items to query for
2644
  @type unit: string or None
2645
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2646
    None for automatic choice (human-readable for non-separator usage,
2647
    otherwise megabytes); this is a one-letter string
2648
  @type separator: string or None
2649
  @param separator: String used to separate fields
2650
  @type header: bool
2651
  @param header: Whether to show header row
2652
  @type force_filter: bool
2653
  @param force_filter: Whether to always treat names as filter
2654
  @type format_override: dict
2655
  @param format_override: Dictionary for overriding field formatting functions,
2656
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2657
  @type verbose: boolean
2658
  @param verbose: whether to use verbose field descriptions or not
2659

2660
  """
2661
  if cl is None:
2662
    cl = GetClient()
2663

    
2664
  if not names:
2665
    names = None
2666

    
2667
  filter_ = qlang.MakeFilter(names, force_filter)
2668

    
2669
  response = cl.Query(resource, fields, filter_)
2670

    
2671
  found_unknown = _WarnUnknownFields(response.fields)
2672

    
2673
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2674
                                     header=header,
2675
                                     format_override=format_override,
2676
                                     verbose=verbose)
2677

    
2678
  for line in data:
2679
    ToStdout(line)
2680

    
2681
  assert ((found_unknown and status == QR_UNKNOWN) or
2682
          (not found_unknown and status != QR_UNKNOWN))
2683

    
2684
  if status == QR_UNKNOWN:
2685
    return constants.EXIT_UNKNOWN_FIELD
2686

    
2687
  # TODO: Should the list command fail if not all data could be collected?
2688
  return constants.EXIT_SUCCESS
2689

    
2690

    
2691
def GenericListFields(resource, fields, separator, header, cl=None):
2692
  """Generic implementation for listing fields for a resource.
2693

2694
  @param resource: One of L{constants.QR_VIA_LUXI}
2695
  @type fields: list of strings
2696
  @param fields: List of fields to query for
2697
  @type separator: string or None
2698
  @param separator: String used to separate fields
2699
  @type header: bool
2700
  @param header: Whether to show header row
2701

2702
  """
2703
  if cl is None:
2704
    cl = GetClient()
2705

    
2706
  if not fields:
2707
    fields = None
2708

    
2709
  response = cl.QueryFields(resource, fields)
2710

    
2711
  found_unknown = _WarnUnknownFields(response.fields)
2712

    
2713
  columns = [
2714
    TableColumn("Name", str, False),
2715
    TableColumn("Title", str, False),
2716
    TableColumn("Description", str, False),
2717
    ]
2718

    
2719
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2720

    
2721
  for line in FormatTable(rows, columns, header, separator):
2722
    ToStdout(line)
2723

    
2724
  if found_unknown:
2725
    return constants.EXIT_UNKNOWN_FIELD
2726

    
2727
  return constants.EXIT_SUCCESS
2728

    
2729

    
2730
class TableColumn:
2731
  """Describes a column for L{FormatTable}.
2732

2733
  """
2734
  def __init__(self, title, fn, align_right):
2735
    """Initializes this class.
2736

2737
    @type title: string
2738
    @param title: Column title
2739
    @type fn: callable
2740
    @param fn: Formatting function
2741
    @type align_right: bool
2742
    @param align_right: Whether to align values on the right-hand side
2743

2744
    """
2745
    self.title = title
2746
    self.format = fn
2747
    self.align_right = align_right
2748

    
2749

    
2750
def _GetColFormatString(width, align_right):
2751
  """Returns the format string for a field.
2752

2753
  """
2754
  if align_right:
2755
    sign = ""
2756
  else:
2757
    sign = "-"
2758

    
2759
  return "%%%s%ss" % (sign, width)
2760

    
2761

    
2762
def FormatTable(rows, columns, header, separator):
2763
  """Formats data as a table.
2764

2765
  @type rows: list of lists
2766
  @param rows: Row data, one list per row
2767
  @type columns: list of L{TableColumn}
2768
  @param columns: Column descriptions
2769
  @type header: bool
2770
  @param header: Whether to show header row
2771
  @type separator: string or None
2772
  @param separator: String used to separate columns
2773

2774
  """
2775
  if header:
2776
    data = [[col.title for col in columns]]
2777
    colwidth = [len(col.title) for col in columns]
2778
  else:
2779
    data = []
2780
    colwidth = [0 for _ in columns]
2781

    
2782
  # Format row data
2783
  for row in rows:
2784
    assert len(row) == len(columns)
2785

    
2786
    formatted = [col.format(value) for value, col in zip(row, columns)]
2787

    
2788
    if separator is None:
2789
      # Update column widths
2790
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2791
        # Modifying a list's items while iterating is fine
2792
        colwidth[idx] = max(oldwidth, len(value))
2793

    
2794
    data.append(formatted)
2795

    
2796
  if separator is not None:
2797
    # Return early if a separator is used
2798
    return [separator.join(row) for row in data]
2799

    
2800
  if columns and not columns[-1].align_right:
2801
    # Avoid unnecessary spaces at end of line
2802
    colwidth[-1] = 0
2803

    
2804
  # Build format string
2805
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2806
                  for col, width in zip(columns, colwidth)])
2807

    
2808
  return [fmt % tuple(row) for row in data]
2809

    
2810

    
2811
def FormatTimestamp(ts):
2812
  """Formats a given timestamp.
2813

2814
  @type ts: timestamp
2815
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2816

2817
  @rtype: string
2818
  @return: a string with the formatted timestamp
2819

2820
  """
2821
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2822
    return "?"
2823
  sec, usec = ts
2824
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2825

    
2826

    
2827
def ParseTimespec(value):
2828
  """Parse a time specification.
2829

2830
  The following suffixed will be recognized:
2831

2832
    - s: seconds
2833
    - m: minutes
2834
    - h: hours
2835
    - d: day
2836
    - w: weeks
2837

2838
  Without any suffix, the value will be taken to be in seconds.
2839

2840
  """
2841
  value = str(value)
2842
  if not value:
2843
    raise errors.OpPrereqError("Empty time specification passed")
2844
  suffix_map = {
2845
    "s": 1,
2846
    "m": 60,
2847
    "h": 3600,
2848
    "d": 86400,
2849
    "w": 604800,
2850
    }
2851
  if value[-1] not in suffix_map:
2852
    try:
2853
      value = int(value)
2854
    except (TypeError, ValueError):
2855
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2856
  else:
2857
    multiplier = suffix_map[value[-1]]
2858
    value = value[:-1]
2859
    if not value: # no data left after stripping the suffix
2860
      raise errors.OpPrereqError("Invalid time specification (only"
2861
                                 " suffix passed)")
2862
    try:
2863
      value = int(value) * multiplier
2864
    except (TypeError, ValueError):
2865
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2866
  return value
2867

    
2868

    
2869
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2870
                   filter_master=False, nodegroup=None):
2871
  """Returns the names of online nodes.
2872

2873
  This function will also log a warning on stderr with the names of
2874
  the online nodes.
2875

2876
  @param nodes: if not empty, use only this subset of nodes (minus the
2877
      offline ones)
2878
  @param cl: if not None, luxi client to use
2879
  @type nowarn: boolean
2880
  @param nowarn: by default, this function will output a note with the
2881
      offline nodes that are skipped; if this parameter is True the
2882
      note is not displayed
2883
  @type secondary_ips: boolean
2884
  @param secondary_ips: if True, return the secondary IPs instead of the
2885
      names, useful for doing network traffic over the replication interface
2886
      (if any)
2887
  @type filter_master: boolean
2888
  @param filter_master: if True, do not return the master node in the list
2889
      (useful in coordination with secondary_ips where we cannot check our
2890
      node name against the list)
2891
  @type nodegroup: string
2892
  @param nodegroup: If set, only return nodes in this node group
2893

2894
  """
2895
  if cl is None:
2896
    cl = GetClient()
2897

    
2898
  filter_ = []
2899

    
2900
  if nodes:
2901
    filter_.append(qlang.MakeSimpleFilter("name", nodes))
2902

    
2903
  if nodegroup is not None:
2904
    filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2905
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2906

    
2907
  if filter_master:
2908
    filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2909

    
2910
  if filter_:
2911
    if len(filter_) > 1:
2912
      final_filter = [qlang.OP_AND] + filter_
2913
    else:
2914
      assert len(filter_) == 1
2915
      final_filter = filter_[0]
2916
  else:
2917
    final_filter = None
2918

    
2919
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2920

    
2921
  def _IsOffline(row):
2922
    (_, (_, offline), _) = row
2923
    return offline
2924

    
2925
  def _GetName(row):
2926
    ((_, name), _, _) = row
2927
    return name
2928

    
2929
  def _GetSip(row):
2930
    (_, _, (_, sip)) = row
2931
    return sip
2932

    
2933
  (offline, online) = compat.partition(result.data, _IsOffline)
2934

    
2935
  if offline and not nowarn:
2936
    ToStderr("Note: skipping offline node(s): %s" %
2937
             utils.CommaJoin(map(_GetName, offline)))
2938

    
2939
  if secondary_ips:
2940
    fn = _GetSip
2941
  else:
2942
    fn = _GetName
2943

    
2944
  return map(fn, online)
2945

    
2946

    
2947
def _ToStream(stream, txt, *args):
2948
  """Write a message to a stream, bypassing the logging system
2949

2950
  @type stream: file object
2951
  @param stream: the file to which we should write
2952
  @type txt: str
2953
  @param txt: the message
2954

2955
  """
2956
  try:
2957
    if args:
2958
      args = tuple(args)
2959
      stream.write(txt % args)
2960
    else:
2961
      stream.write(txt)
2962
    stream.write("\n")
2963
    stream.flush()
2964
  except IOError, err:
2965
    if err.errno == errno.EPIPE:
2966
      # our terminal went away, we'll exit
2967
      sys.exit(constants.EXIT_FAILURE)
2968
    else:
2969
      raise
2970

    
2971

    
2972
def ToStdout(txt, *args):
2973
  """Write a message to stdout only, bypassing the logging system
2974

2975
  This is just a wrapper over _ToStream.
2976

2977
  @type txt: str
2978
  @param txt: the message
2979

2980
  """
2981
  _ToStream(sys.stdout, txt, *args)
2982

    
2983

    
2984
def ToStderr(txt, *args):
2985
  """Write a message to stderr only, bypassing the logging system
2986

2987
  This is just a wrapper over _ToStream.
2988

2989
  @type txt: str
2990
  @param txt: the message
2991

2992
  """
2993
  _ToStream(sys.stderr, txt, *args)
2994

    
2995

    
2996
class JobExecutor(object):
2997
  """Class which manages the submission and execution of multiple jobs.
2998

2999
  Note that instances of this class should not be reused between
3000
  GetResults() calls.
3001

3002
  """
3003
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3004
    self.queue = []
3005
    if cl is None:
3006
      cl = GetClient()
3007
    self.cl = cl
3008
    self.verbose = verbose
3009
    self.jobs = []
3010
    self.opts = opts
3011
    self.feedback_fn = feedback_fn
3012
    self._counter = itertools.count()
3013

    
3014
  @staticmethod
3015
  def _IfName(name, fmt):
3016
    """Helper function for formatting name.
3017

3018
    """
3019
    if name:
3020
      return fmt % name
3021

    
3022
    return ""
3023

    
3024
  def QueueJob(self, name, *ops):
3025
    """Record a job for later submit.
3026

3027
    @type name: string
3028
    @param name: a description of the job, will be used in WaitJobSet
3029

3030
    """
3031
    SetGenericOpcodeOpts(ops, self.opts)
3032
    self.queue.append((self._counter.next(), name, ops))
3033

    
3034
  def AddJobId(self, name, status, job_id):
3035
    """Adds a job ID to the internal queue.
3036

3037
    """
3038
    self.jobs.append((self._counter.next(), status, job_id, name))
3039

    
3040
  def SubmitPending(self, each=False):
3041
    """Submit all pending jobs.
3042

3043
    """
3044
    if each:
3045
      results = []
3046
      for (_, _, ops) in self.queue:
3047
        # SubmitJob will remove the success status, but raise an exception if
3048
        # the submission fails, so we'll notice that anyway.
3049
        results.append([True, self.cl.SubmitJob(ops)])
3050
    else:
3051
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3052
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3053
      self.jobs.append((idx, status, data, name))
3054

    
3055
  def _ChooseJob(self):
3056
    """Choose a non-waiting/queued job to poll next.
3057

3058
    """
3059
    assert self.jobs, "_ChooseJob called with empty job list"
3060

    
3061
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3062
                               ["status"])
3063
    assert result
3064

    
3065
    for job_data, status in zip(self.jobs, result):
3066
      if (isinstance(status, list) and status and
3067
          status[0] in (constants.JOB_STATUS_QUEUED,
3068
                        constants.JOB_STATUS_WAITING,
3069
                        constants.JOB_STATUS_CANCELING)):
3070
        # job is still present and waiting
3071
        continue
3072
      # good candidate found (either running job or lost job)
3073
      self.jobs.remove(job_data)
3074
      return job_data
3075

    
3076
    # no job found
3077
    return self.jobs.pop(0)
3078

    
3079
  def GetResults(self):
3080
    """Wait for and return the results of all jobs.
3081

3082
    @rtype: list
3083
    @return: list of tuples (success, job results), in the same order
3084
        as the submitted jobs; if a job has failed, instead of the result
3085
        there will be the error message
3086

3087
    """
3088
    if not self.jobs:
3089
      self.SubmitPending()
3090
    results = []
3091
    if self.verbose:
3092
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3093
      if ok_jobs:
3094
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3095

    
3096
    # first, remove any non-submitted jobs
3097
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3098
    for idx, _, jid, name in failures:
3099
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3100
      results.append((idx, False, jid))
3101

    
3102
    while self.jobs:
3103
      (idx, _, jid, name) = self._ChooseJob()
3104
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3105
      try:
3106
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3107
        success = True
3108
      except errors.JobLost, err:
3109
        _, job_result = FormatError(err)
3110
        ToStderr("Job %s%s has been archived, cannot check its result",
3111
                 jid, self._IfName(name, " for %s"))
3112
        success = False
3113
      except (errors.GenericError, luxi.ProtocolError), err:
3114
        _, job_result = FormatError(err)
3115
        success = False
3116
        # the error message will always be shown, verbose or not
3117
        ToStderr("Job %s%s has failed: %s",
3118
                 jid, self._IfName(name, " for %s"), job_result)
3119

    
3120
      results.append((idx, success, job_result))
3121

    
3122
    # sort based on the index, then drop it
3123
    results.sort()
3124
    results = [i[1:] for i in results]
3125

    
3126
    return results
3127

    
3128
  def WaitOrShow(self, wait):
3129
    """Wait for job results or only print the job IDs.
3130

3131
    @type wait: boolean
3132
    @param wait: whether to wait or not
3133

3134
    """
3135
    if wait:
3136
      return self.GetResults()
3137
    else:
3138
      if not self.jobs:
3139
        self.SubmitPending()
3140
      for _, status, result, name in self.jobs:
3141
        if status:
3142
          ToStdout("%s: %s", result, name)
3143
        else:
3144
          ToStderr("Failure for %s: %s", name, result)
3145
      return [row[1:3] for row in self.jobs]
3146

    
3147

    
3148
def FormatParameterDict(buf, param_dict, actual, level=1):
3149
  """Formats a parameter dictionary.
3150

3151
  @type buf: L{StringIO}
3152
  @param buf: the buffer into which to write
3153
  @type param_dict: dict
3154
  @param param_dict: the own parameters
3155
  @type actual: dict
3156
  @param actual: the current parameter set (including defaults)
3157
  @param level: Level of indent
3158

3159
  """
3160
  indent = "  " * level
3161
  for key in sorted(actual):
3162
    val = param_dict.get(key, "default (%s)" % actual[key])
3163
    buf.write("%s- %s: %s\n" % (indent, key, val))
3164

    
3165

    
3166
def ConfirmOperation(names, list_type, text, extra=""):
3167
  """Ask the user to confirm an operation on a list of list_type.
3168

3169
  This function is used to request confirmation for doing an operation
3170
  on a given list of list_type.
3171

3172
  @type names: list
3173
  @param names: the list of names that we display when
3174
      we ask for confirmation
3175
  @type list_type: str
3176
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3177
  @type text: str
3178
  @param text: the operation that the user should confirm
3179
  @rtype: boolean
3180
  @return: True or False depending on user's confirmation.
3181

3182
  """
3183
  count = len(names)
3184
  msg = ("The %s will operate on %d %s.\n%s"
3185
         "Do you want to continue?" % (text, count, list_type, extra))
3186
  affected = (("\nAffected %s:\n" % list_type) +
3187
              "\n".join(["  %s" % name for name in names]))
3188

    
3189
  choices = [("y", True, "Yes, execute the %s" % text),
3190
             ("n", False, "No, abort the %s" % text)]
3191

    
3192
  if count > 20:
3193
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3194
    question = msg
3195
  else:
3196
    question = msg + affected
3197

    
3198
  choice = AskUser(question, choices)
3199
  if choice == "v":
3200
    choices.pop(1)
3201
    choice = AskUser(msg + affected, choices)
3202
  return choice