Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ f8638e28

History | View | Annotate | Download (102.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
from cStringIO import StringIO
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import opcodes
38
from ganeti import luxi
39
from ganeti import ssconf
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45

    
46
from optparse import (OptionParser, TitledHelpFormatter,
47
                      Option, OptionValueError)
48

    
49

    
50
__all__ = [
51
  # Command line options
52
  "ADD_UIDS_OPT",
53
  "ALLOCATABLE_OPT",
54
  "ALLOC_POLICY_OPT",
55
  "ALL_OPT",
56
  "ALLOW_FAILOVER_OPT",
57
  "AUTO_PROMOTE_OPT",
58
  "AUTO_REPLACE_OPT",
59
  "BACKEND_OPT",
60
  "BLK_OS_OPT",
61
  "CAPAB_MASTER_OPT",
62
  "CAPAB_VM_OPT",
63
  "CLEANUP_OPT",
64
  "CLUSTER_DOMAIN_SECRET_OPT",
65
  "CONFIRM_OPT",
66
  "CP_SIZE_OPT",
67
  "DEBUG_OPT",
68
  "DEBUG_SIMERR_OPT",
69
  "DISKIDX_OPT",
70
  "DISK_OPT",
71
  "DISK_TEMPLATE_OPT",
72
  "DRAINED_OPT",
73
  "DRY_RUN_OPT",
74
  "DRBD_HELPER_OPT",
75
  "DST_NODE_OPT",
76
  "EARLY_RELEASE_OPT",
77
  "ENABLED_HV_OPT",
78
  "ERROR_CODES_OPT",
79
  "FIELDS_OPT",
80
  "FILESTORE_DIR_OPT",
81
  "FILESTORE_DRIVER_OPT",
82
  "FORCE_FILTER_OPT",
83
  "FORCE_OPT",
84
  "FORCE_VARIANT_OPT",
85
  "GLOBAL_FILEDIR_OPT",
86
  "HID_OS_OPT",
87
  "GLOBAL_SHARED_FILEDIR_OPT",
88
  "HVLIST_OPT",
89
  "HVOPTS_OPT",
90
  "HYPERVISOR_OPT",
91
  "IALLOCATOR_OPT",
92
  "DEFAULT_IALLOCATOR_OPT",
93
  "IDENTIFY_DEFAULTS_OPT",
94
  "IGNORE_CONSIST_OPT",
95
  "IGNORE_FAILURES_OPT",
96
  "IGNORE_OFFLINE_OPT",
97
  "IGNORE_REMOVE_FAILURES_OPT",
98
  "IGNORE_SECONDARIES_OPT",
99
  "IGNORE_SIZE_OPT",
100
  "INTERVAL_OPT",
101
  "MAC_PREFIX_OPT",
102
  "MAINTAIN_NODE_HEALTH_OPT",
103
  "MASTER_NETDEV_OPT",
104
  "MC_OPT",
105
  "MIGRATION_MODE_OPT",
106
  "NET_OPT",
107
  "NEW_CLUSTER_CERT_OPT",
108
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109
  "NEW_CONFD_HMAC_KEY_OPT",
110
  "NEW_RAPI_CERT_OPT",
111
  "NEW_SECONDARY_OPT",
112
  "NIC_PARAMS_OPT",
113
  "NODE_FORCE_JOIN_OPT",
114
  "NODE_LIST_OPT",
115
  "NODE_PLACEMENT_OPT",
116
  "NODEGROUP_OPT",
117
  "NODE_PARAMS_OPT",
118
  "NODE_POWERED_OPT",
119
  "NODRBD_STORAGE_OPT",
120
  "NOHDR_OPT",
121
  "NOIPCHECK_OPT",
122
  "NO_INSTALL_OPT",
123
  "NONAMECHECK_OPT",
124
  "NOLVM_STORAGE_OPT",
125
  "NOMODIFY_ETCHOSTS_OPT",
126
  "NOMODIFY_SSH_SETUP_OPT",
127
  "NONICS_OPT",
128
  "NONLIVE_OPT",
129
  "NONPLUS1_OPT",
130
  "NOSHUTDOWN_OPT",
131
  "NOSTART_OPT",
132
  "NOSSH_KEYCHECK_OPT",
133
  "NOVOTING_OPT",
134
  "NO_REMEMBER_OPT",
135
  "NWSYNC_OPT",
136
  "ON_PRIMARY_OPT",
137
  "ON_SECONDARY_OPT",
138
  "OFFLINE_OPT",
139
  "OSPARAMS_OPT",
140
  "OS_OPT",
141
  "OS_SIZE_OPT",
142
  "OOB_TIMEOUT_OPT",
143
  "POWER_DELAY_OPT",
144
  "PREALLOC_WIPE_DISKS_OPT",
145
  "PRIMARY_IP_VERSION_OPT",
146
  "PRIMARY_ONLY_OPT",
147
  "PRIORITY_OPT",
148
  "RAPI_CERT_OPT",
149
  "READD_OPT",
150
  "REBOOT_TYPE_OPT",
151
  "REMOVE_INSTANCE_OPT",
152
  "REMOVE_UIDS_OPT",
153
  "RESERVED_LVS_OPT",
154
  "ROMAN_OPT",
155
  "SECONDARY_IP_OPT",
156
  "SECONDARY_ONLY_OPT",
157
  "SELECT_OS_OPT",
158
  "SEP_OPT",
159
  "SHOWCMD_OPT",
160
  "SHUTDOWN_TIMEOUT_OPT",
161
  "SINGLE_NODE_OPT",
162
  "SRC_DIR_OPT",
163
  "SRC_NODE_OPT",
164
  "SUBMIT_OPT",
165
  "STARTUP_PAUSED_OPT",
166
  "STATIC_OPT",
167
  "SYNC_OPT",
168
  "TAG_ADD_OPT",
169
  "TAG_SRC_OPT",
170
  "TIMEOUT_OPT",
171
  "TO_GROUP_OPT",
172
  "UIDPOOL_OPT",
173
  "USEUNITS_OPT",
174
  "USE_REPL_NET_OPT",
175
  "VERBOSE_OPT",
176
  "VG_NAME_OPT",
177
  "YES_DOIT_OPT",
178
  # Generic functions for CLI programs
179
  "ConfirmOperation",
180
  "GenericMain",
181
  "GenericInstanceCreate",
182
  "GenericList",
183
  "GenericListFields",
184
  "GetClient",
185
  "GetOnlineNodes",
186
  "JobExecutor",
187
  "JobSubmittedException",
188
  "ParseTimespec",
189
  "RunWhileClusterStopped",
190
  "SubmitOpCode",
191
  "SubmitOrSend",
192
  "UsesRPC",
193
  # Formatting functions
194
  "ToStderr", "ToStdout",
195
  "FormatError",
196
  "FormatQueryResult",
197
  "FormatParameterDict",
198
  "GenerateTable",
199
  "AskUser",
200
  "FormatTimestamp",
201
  "FormatLogMessage",
202
  # Tags functions
203
  "ListTags",
204
  "AddTags",
205
  "RemoveTags",
206
  # command line options support infrastructure
207
  "ARGS_MANY_INSTANCES",
208
  "ARGS_MANY_NODES",
209
  "ARGS_MANY_GROUPS",
210
  "ARGS_NONE",
211
  "ARGS_ONE_INSTANCE",
212
  "ARGS_ONE_NODE",
213
  "ARGS_ONE_GROUP",
214
  "ARGS_ONE_OS",
215
  "ArgChoice",
216
  "ArgCommand",
217
  "ArgFile",
218
  "ArgGroup",
219
  "ArgHost",
220
  "ArgInstance",
221
  "ArgJobId",
222
  "ArgNode",
223
  "ArgOs",
224
  "ArgSuggest",
225
  "ArgUnknown",
226
  "OPT_COMPL_INST_ADD_NODES",
227
  "OPT_COMPL_MANY_NODES",
228
  "OPT_COMPL_ONE_IALLOCATOR",
229
  "OPT_COMPL_ONE_INSTANCE",
230
  "OPT_COMPL_ONE_NODE",
231
  "OPT_COMPL_ONE_NODEGROUP",
232
  "OPT_COMPL_ONE_OS",
233
  "cli_option",
234
  "SplitNodeOption",
235
  "CalculateOSNames",
236
  "ParseFields",
237
  "COMMON_CREATE_OPTS",
238
  ]
239

    
240
NO_PREFIX = "no_"
241
UN_PREFIX = "-"
242

    
243
#: Priorities (sorted)
244
_PRIORITY_NAMES = [
245
  ("low", constants.OP_PRIO_LOW),
246
  ("normal", constants.OP_PRIO_NORMAL),
247
  ("high", constants.OP_PRIO_HIGH),
248
  ]
249

    
250
#: Priority dictionary for easier lookup
251
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
252
# we migrate to Python 2.6
253
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
254

    
255
# Query result status for clients
256
(QR_NORMAL,
257
 QR_UNKNOWN,
258
 QR_INCOMPLETE) = range(3)
259

    
260

    
261
class _Argument:
262
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
263
    self.min = min
264
    self.max = max
265

    
266
  def __repr__(self):
267
    return ("<%s min=%s max=%s>" %
268
            (self.__class__.__name__, self.min, self.max))
269

    
270

    
271
class ArgSuggest(_Argument):
272
  """Suggesting argument.
273

274
  Value can be any of the ones passed to the constructor.
275

276
  """
277
  # pylint: disable-msg=W0622
278
  def __init__(self, min=0, max=None, choices=None):
279
    _Argument.__init__(self, min=min, max=max)
280
    self.choices = choices
281

    
282
  def __repr__(self):
283
    return ("<%s min=%s max=%s choices=%r>" %
284
            (self.__class__.__name__, self.min, self.max, self.choices))
285

    
286

    
287
class ArgChoice(ArgSuggest):
288
  """Choice argument.
289

290
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
291
  but value must be one of the choices.
292

293
  """
294

    
295

    
296
class ArgUnknown(_Argument):
297
  """Unknown argument to program (e.g. determined at runtime).
298

299
  """
300

    
301

    
302
class ArgInstance(_Argument):
303
  """Instances argument.
304

305
  """
306

    
307

    
308
class ArgNode(_Argument):
309
  """Node argument.
310

311
  """
312

    
313

    
314
class ArgGroup(_Argument):
315
  """Node group argument.
316

317
  """
318

    
319

    
320
class ArgJobId(_Argument):
321
  """Job ID argument.
322

323
  """
324

    
325

    
326
class ArgFile(_Argument):
327
  """File path argument.
328

329
  """
330

    
331

    
332
class ArgCommand(_Argument):
333
  """Command argument.
334

335
  """
336

    
337

    
338
class ArgHost(_Argument):
339
  """Host argument.
340

341
  """
342

    
343

    
344
class ArgOs(_Argument):
345
  """OS argument.
346

347
  """
348

    
349

    
350
ARGS_NONE = []
351
ARGS_MANY_INSTANCES = [ArgInstance()]
352
ARGS_MANY_NODES = [ArgNode()]
353
ARGS_MANY_GROUPS = [ArgGroup()]
354
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
355
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
356
# TODO
357
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
358
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
359

    
360

    
361
def _ExtractTagsObject(opts, args):
362
  """Extract the tag type object.
363

364
  Note that this function will modify its args parameter.
365

366
  """
367
  if not hasattr(opts, "tag_type"):
368
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
369
  kind = opts.tag_type
370
  if kind == constants.TAG_CLUSTER:
371
    retval = kind, kind
372
  elif kind in (constants.TAG_NODEGROUP,
373
                constants.TAG_NODE,
374
                constants.TAG_INSTANCE):
375
    if not args:
376
      raise errors.OpPrereqError("no arguments passed to the command")
377
    name = args.pop(0)
378
    retval = kind, name
379
  else:
380
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
381
  return retval
382

    
383

    
384
def _ExtendTags(opts, args):
385
  """Extend the args if a source file has been given.
386

387
  This function will extend the tags with the contents of the file
388
  passed in the 'tags_source' attribute of the opts parameter. A file
389
  named '-' will be replaced by stdin.
390

391
  """
392
  fname = opts.tags_source
393
  if fname is None:
394
    return
395
  if fname == "-":
396
    new_fh = sys.stdin
397
  else:
398
    new_fh = open(fname, "r")
399
  new_data = []
400
  try:
401
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
402
    # because of python bug 1633941
403
    while True:
404
      line = new_fh.readline()
405
      if not line:
406
        break
407
      new_data.append(line.strip())
408
  finally:
409
    new_fh.close()
410
  args.extend(new_data)
411

    
412

    
413
def ListTags(opts, args):
414
  """List the tags on a given object.
415

416
  This is a generic implementation that knows how to deal with all
417
  three cases of tag objects (cluster, node, instance). The opts
418
  argument is expected to contain a tag_type field denoting what
419
  object type we work on.
420

421
  """
422
  kind, name = _ExtractTagsObject(opts, args)
423
  cl = GetClient()
424
  result = cl.QueryTags(kind, name)
425
  result = list(result)
426
  result.sort()
427
  for tag in result:
428
    ToStdout(tag)
429

    
430

    
431
def AddTags(opts, args):
432
  """Add tags on a given object.
433

434
  This is a generic implementation that knows how to deal with all
435
  three cases of tag objects (cluster, node, instance). The opts
436
  argument is expected to contain a tag_type field denoting what
437
  object type we work on.
438

439
  """
440
  kind, name = _ExtractTagsObject(opts, args)
441
  _ExtendTags(opts, args)
442
  if not args:
443
    raise errors.OpPrereqError("No tags to be added")
444
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
445
  SubmitOpCode(op, opts=opts)
446

    
447

    
448
def RemoveTags(opts, args):
449
  """Remove tags from a given object.
450

451
  This is a generic implementation that knows how to deal with all
452
  three cases of tag objects (cluster, node, instance). The opts
453
  argument is expected to contain a tag_type field denoting what
454
  object type we work on.
455

456
  """
457
  kind, name = _ExtractTagsObject(opts, args)
458
  _ExtendTags(opts, args)
459
  if not args:
460
    raise errors.OpPrereqError("No tags to be removed")
461
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
462
  SubmitOpCode(op, opts=opts)
463

    
464

    
465
def check_unit(option, opt, value): # pylint: disable-msg=W0613
466
  """OptParsers custom converter for units.
467

468
  """
469
  try:
470
    return utils.ParseUnit(value)
471
  except errors.UnitParseError, err:
472
    raise OptionValueError("option %s: %s" % (opt, err))
473

    
474

    
475
def _SplitKeyVal(opt, data):
476
  """Convert a KeyVal string into a dict.
477

478
  This function will convert a key=val[,...] string into a dict. Empty
479
  values will be converted specially: keys which have the prefix 'no_'
480
  will have the value=False and the prefix stripped, the others will
481
  have value=True.
482

483
  @type opt: string
484
  @param opt: a string holding the option name for which we process the
485
      data, used in building error messages
486
  @type data: string
487
  @param data: a string of the format key=val,key=val,...
488
  @rtype: dict
489
  @return: {key=val, key=val}
490
  @raises errors.ParameterError: if there are duplicate keys
491

492
  """
493
  kv_dict = {}
494
  if data:
495
    for elem in utils.UnescapeAndSplit(data, sep=","):
496
      if "=" in elem:
497
        key, val = elem.split("=", 1)
498
      else:
499
        if elem.startswith(NO_PREFIX):
500
          key, val = elem[len(NO_PREFIX):], False
501
        elif elem.startswith(UN_PREFIX):
502
          key, val = elem[len(UN_PREFIX):], None
503
        else:
504
          key, val = elem, True
505
      if key in kv_dict:
506
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
507
                                    (key, opt))
508
      kv_dict[key] = val
509
  return kv_dict
510

    
511

    
512
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
513
  """Custom parser for ident:key=val,key=val options.
514

515
  This will store the parsed values as a tuple (ident, {key: val}). As such,
516
  multiple uses of this option via action=append is possible.
517

518
  """
519
  if ":" not in value:
520
    ident, rest = value, ""
521
  else:
522
    ident, rest = value.split(":", 1)
523

    
524
  if ident.startswith(NO_PREFIX):
525
    if rest:
526
      msg = "Cannot pass options when removing parameter groups: %s" % value
527
      raise errors.ParameterError(msg)
528
    retval = (ident[len(NO_PREFIX):], False)
529
  elif ident.startswith(UN_PREFIX):
530
    if rest:
531
      msg = "Cannot pass options when removing parameter groups: %s" % value
532
      raise errors.ParameterError(msg)
533
    retval = (ident[len(UN_PREFIX):], None)
534
  else:
535
    kv_dict = _SplitKeyVal(opt, rest)
536
    retval = (ident, kv_dict)
537
  return retval
538

    
539

    
540
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
541
  """Custom parser class for key=val,key=val options.
542

543
  This will store the parsed values as a dict {key: val}.
544

545
  """
546
  return _SplitKeyVal(opt, value)
547

    
548

    
549
def check_bool(option, opt, value): # pylint: disable-msg=W0613
550
  """Custom parser for yes/no options.
551

552
  This will store the parsed value as either True or False.
553

554
  """
555
  value = value.lower()
556
  if value == constants.VALUE_FALSE or value == "no":
557
    return False
558
  elif value == constants.VALUE_TRUE or value == "yes":
559
    return True
560
  else:
561
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
562

    
563

    
564
# completion_suggestion is normally a list. Using numeric values not evaluating
565
# to False for dynamic completion.
566
(OPT_COMPL_MANY_NODES,
567
 OPT_COMPL_ONE_NODE,
568
 OPT_COMPL_ONE_INSTANCE,
569
 OPT_COMPL_ONE_OS,
570
 OPT_COMPL_ONE_IALLOCATOR,
571
 OPT_COMPL_INST_ADD_NODES,
572
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
573

    
574
OPT_COMPL_ALL = frozenset([
575
  OPT_COMPL_MANY_NODES,
576
  OPT_COMPL_ONE_NODE,
577
  OPT_COMPL_ONE_INSTANCE,
578
  OPT_COMPL_ONE_OS,
579
  OPT_COMPL_ONE_IALLOCATOR,
580
  OPT_COMPL_INST_ADD_NODES,
581
  OPT_COMPL_ONE_NODEGROUP,
582
  ])
583

    
584

    
585
class CliOption(Option):
586
  """Custom option class for optparse.
587

588
  """
589
  ATTRS = Option.ATTRS + [
590
    "completion_suggest",
591
    ]
592
  TYPES = Option.TYPES + (
593
    "identkeyval",
594
    "keyval",
595
    "unit",
596
    "bool",
597
    )
598
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
599
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
600
  TYPE_CHECKER["keyval"] = check_key_val
601
  TYPE_CHECKER["unit"] = check_unit
602
  TYPE_CHECKER["bool"] = check_bool
603

    
604

    
605
# optparse.py sets make_option, so we do it for our own option class, too
606
cli_option = CliOption
607

    
608

    
609
_YORNO = "yes|no"
610

    
611
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
612
                       help="Increase debugging level")
613

    
614
NOHDR_OPT = cli_option("--no-headers", default=False,
615
                       action="store_true", dest="no_headers",
616
                       help="Don't display column headers")
617

    
618
SEP_OPT = cli_option("--separator", default=None,
619
                     action="store", dest="separator",
620
                     help=("Separator between output fields"
621
                           " (defaults to one space)"))
622

    
623
USEUNITS_OPT = cli_option("--units", default=None,
624
                          dest="units", choices=("h", "m", "g", "t"),
625
                          help="Specify units for output (one of h/m/g/t)")
626

    
627
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
628
                        type="string", metavar="FIELDS",
629
                        help="Comma separated list of output fields")
630

    
631
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
632
                       default=False, help="Force the operation")
633

    
634
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
635
                         default=False, help="Do not require confirmation")
636

    
637
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
638
                                  action="store_true", default=False,
639
                                  help=("Ignore offline nodes and do as much"
640
                                        " as possible"))
641

    
642
TAG_ADD_OPT = cli_option("--tags", dest="tags",
643
                         default=None, help="Comma-separated list of instance"
644
                                            " tags")
645

    
646
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
647
                         default=None, help="File with tag names")
648

    
649
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
650
                        default=False, action="store_true",
651
                        help=("Submit the job and return the job ID, but"
652
                              " don't wait for the job to finish"))
653

    
654
SYNC_OPT = cli_option("--sync", dest="do_locking",
655
                      default=False, action="store_true",
656
                      help=("Grab locks while doing the queries"
657
                            " in order to ensure more consistent results"))
658

    
659
DRY_RUN_OPT = cli_option("--dry-run", default=False,
660
                         action="store_true",
661
                         help=("Do not execute the operation, just run the"
662
                               " check steps and verify it it could be"
663
                               " executed"))
664

    
665
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
666
                         action="store_true",
667
                         help="Increase the verbosity of the operation")
668

    
669
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
670
                              action="store_true", dest="simulate_errors",
671
                              help="Debugging option that makes the operation"
672
                              " treat most runtime checks as failed")
673

    
674
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
675
                        default=True, action="store_false",
676
                        help="Don't wait for sync (DANGEROUS!)")
677

    
678
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
679
                               help=("Custom disk setup (%s)" %
680
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
681
                               default=None, metavar="TEMPL",
682
                               choices=list(constants.DISK_TEMPLATES))
683

    
684
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
685
                        help="Do not create any network cards for"
686
                        " the instance")
687

    
688
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
689
                               help="Relative path under default cluster-wide"
690
                               " file storage dir to store file-based disks",
691
                               default=None, metavar="<DIR>")
692

    
693
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
694
                                  help="Driver to use for image files",
695
                                  default="loop", metavar="<DRIVER>",
696
                                  choices=list(constants.FILE_DRIVER))
697

    
698
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
699
                            help="Select nodes for the instance automatically"
700
                            " using the <NAME> iallocator plugin",
701
                            default=None, type="string",
702
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
703

    
704
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
705
                            metavar="<NAME>",
706
                            help="Set the default instance allocator plugin",
707
                            default=None, type="string",
708
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
709

    
710
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
711
                    metavar="<os>",
712
                    completion_suggest=OPT_COMPL_ONE_OS)
713

    
714
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
715
                         type="keyval", default={},
716
                         help="OS parameters")
717

    
718
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
719
                               action="store_true", default=False,
720
                               help="Force an unknown variant")
721

    
722
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
723
                            action="store_true", default=False,
724
                            help="Do not install the OS (will"
725
                            " enable no-start)")
726

    
727
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
728
                         type="keyval", default={},
729
                         help="Backend parameters")
730

    
731
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
732
                         default={}, dest="hvparams",
733
                         help="Hypervisor parameters")
734

    
735
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
736
                            help="Hypervisor and hypervisor options, in the"
737
                            " format hypervisor:option=value,option=value,...",
738
                            default=None, type="identkeyval")
739

    
740
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
741
                        help="Hypervisor and hypervisor options, in the"
742
                        " format hypervisor:option=value,option=value,...",
743
                        default=[], action="append", type="identkeyval")
744

    
745
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
746
                           action="store_false",
747
                           help="Don't check that the instance's IP"
748
                           " is alive")
749

    
750
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
751
                             default=True, action="store_false",
752
                             help="Don't check that the instance's name"
753
                             " is resolvable")
754

    
755
NET_OPT = cli_option("--net",
756
                     help="NIC parameters", default=[],
757
                     dest="nics", action="append", type="identkeyval")
758

    
759
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
760
                      dest="disks", action="append", type="identkeyval")
761

    
762
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
763
                         help="Comma-separated list of disks"
764
                         " indices to act on (e.g. 0,2) (optional,"
765
                         " defaults to all disks)")
766

    
767
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
768
                         help="Enforces a single-disk configuration using the"
769
                         " given disk size, in MiB unless a suffix is used",
770
                         default=None, type="unit", metavar="<size>")
771

    
772
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
773
                                dest="ignore_consistency",
774
                                action="store_true", default=False,
775
                                help="Ignore the consistency of the disks on"
776
                                " the secondary")
777

    
778
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
779
                                dest="allow_failover",
780
                                action="store_true", default=False,
781
                                help="If migration is not possible fallback to"
782
                                     " failover")
783

    
784
NONLIVE_OPT = cli_option("--non-live", dest="live",
785
                         default=True, action="store_false",
786
                         help="Do a non-live migration (this usually means"
787
                         " freeze the instance, save the state, transfer and"
788
                         " only then resume running on the secondary node)")
789

    
790
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
791
                                default=None,
792
                                choices=list(constants.HT_MIGRATION_MODES),
793
                                help="Override default migration mode (choose"
794
                                " either live or non-live")
795

    
796
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
797
                                help="Target node and optional secondary node",
798
                                metavar="<pnode>[:<snode>]",
799
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
800

    
801
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
802
                           action="append", metavar="<node>",
803
                           help="Use only this node (can be used multiple"
804
                           " times, if not given defaults to all nodes)",
805
                           completion_suggest=OPT_COMPL_ONE_NODE)
806

    
807
NODEGROUP_OPT_NAME = "--node-group"
808
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
809
                           dest="nodegroup",
810
                           help="Node group (name or uuid)",
811
                           metavar="<nodegroup>",
812
                           default=None, type="string",
813
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
814

    
815
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
816
                             metavar="<node>",
817
                             completion_suggest=OPT_COMPL_ONE_NODE)
818

    
819
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
820
                         action="store_false",
821
                         help="Don't start the instance after creation")
822

    
823
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
824
                         action="store_true", default=False,
825
                         help="Show command instead of executing it")
826

    
827
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
828
                         default=False, action="store_true",
829
                         help="Instead of performing the migration, try to"
830
                         " recover from a failed cleanup. This is safe"
831
                         " to run even if the instance is healthy, but it"
832
                         " will create extra replication traffic and "
833
                         " disrupt briefly the replication (like during the"
834
                         " migration")
835

    
836
STATIC_OPT = cli_option("-s", "--static", dest="static",
837
                        action="store_true", default=False,
838
                        help="Only show configuration data, not runtime data")
839

    
840
ALL_OPT = cli_option("--all", dest="show_all",
841
                     default=False, action="store_true",
842
                     help="Show info on all instances on the cluster."
843
                     " This can take a long time to run, use wisely")
844

    
845
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
846
                           action="store_true", default=False,
847
                           help="Interactive OS reinstall, lists available"
848
                           " OS templates for selection")
849

    
850
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
851
                                 action="store_true", default=False,
852
                                 help="Remove the instance from the cluster"
853
                                 " configuration even if there are failures"
854
                                 " during the removal process")
855

    
856
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
857
                                        dest="ignore_remove_failures",
858
                                        action="store_true", default=False,
859
                                        help="Remove the instance from the"
860
                                        " cluster configuration even if there"
861
                                        " are failures during the removal"
862
                                        " process")
863

    
864
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
865
                                 action="store_true", default=False,
866
                                 help="Remove the instance from the cluster")
867

    
868
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
869
                               help="Specifies the new node for the instance",
870
                               metavar="NODE", default=None,
871
                               completion_suggest=OPT_COMPL_ONE_NODE)
872

    
873
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
874
                               help="Specifies the new secondary node",
875
                               metavar="NODE", default=None,
876
                               completion_suggest=OPT_COMPL_ONE_NODE)
877

    
878
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
879
                            default=False, action="store_true",
880
                            help="Replace the disk(s) on the primary"
881
                                 " node (applies only to internally mirrored"
882
                                 " disk templates, e.g. %s)" %
883
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
884

    
885
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
886
                              default=False, action="store_true",
887
                              help="Replace the disk(s) on the secondary"
888
                                   " node (applies only to internally mirrored"
889
                                   " disk templates, e.g. %s)" %
890
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
891

    
892
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
893
                              default=False, action="store_true",
894
                              help="Lock all nodes and auto-promote as needed"
895
                              " to MC status")
896

    
897
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
898
                              default=False, action="store_true",
899
                              help="Automatically replace faulty disks"
900
                                   " (applies only to internally mirrored"
901
                                   " disk templates, e.g. %s)" %
902
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
903

    
904
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
905
                             default=False, action="store_true",
906
                             help="Ignore current recorded size"
907
                             " (useful for forcing activation when"
908
                             " the recorded size is wrong)")
909

    
910
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
911
                          metavar="<node>",
912
                          completion_suggest=OPT_COMPL_ONE_NODE)
913

    
914
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
915
                         metavar="<dir>")
916

    
917
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
918
                              help="Specify the secondary ip for the node",
919
                              metavar="ADDRESS", default=None)
920

    
921
READD_OPT = cli_option("--readd", dest="readd",
922
                       default=False, action="store_true",
923
                       help="Readd old node after replacing it")
924

    
925
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
926
                                default=True, action="store_false",
927
                                help="Disable SSH key fingerprint checking")
928

    
929
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
930
                                 default=False, action="store_true",
931
                                 help="Force the joining of a node")
932

    
933
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
934
                    type="bool", default=None, metavar=_YORNO,
935
                    help="Set the master_candidate flag on the node")
936

    
937
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
938
                         type="bool", default=None,
939
                         help=("Set the offline flag on the node"
940
                               " (cluster does not communicate with offline"
941
                               " nodes)"))
942

    
943
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
944
                         type="bool", default=None,
945
                         help=("Set the drained flag on the node"
946
                               " (excluded from allocation operations)"))
947

    
948
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
949
                    type="bool", default=None, metavar=_YORNO,
950
                    help="Set the master_capable flag on the node")
951

    
952
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
953
                    type="bool", default=None, metavar=_YORNO,
954
                    help="Set the vm_capable flag on the node")
955

    
956
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
957
                             type="bool", default=None, metavar=_YORNO,
958
                             help="Set the allocatable flag on a volume")
959

    
960
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
961
                               help="Disable support for lvm based instances"
962
                               " (cluster-wide)",
963
                               action="store_false", default=True)
964

    
965
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
966
                            dest="enabled_hypervisors",
967
                            help="Comma-separated list of hypervisors",
968
                            type="string", default=None)
969

    
970
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
971
                            type="keyval", default={},
972
                            help="NIC parameters")
973

    
974
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
975
                         dest="candidate_pool_size", type="int",
976
                         help="Set the candidate pool size")
977

    
978
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
979
                         help=("Enables LVM and specifies the volume group"
980
                               " name (cluster-wide) for disk allocation"
981
                               " [%s]" % constants.DEFAULT_VG),
982
                         metavar="VG", default=None)
983

    
984
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
985
                          help="Destroy cluster", action="store_true")
986

    
987
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
988
                          help="Skip node agreement check (dangerous)",
989
                          action="store_true", default=False)
990

    
991
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
992
                            help="Specify the mac prefix for the instance IP"
993
                            " addresses, in the format XX:XX:XX",
994
                            metavar="PREFIX",
995
                            default=None)
996

    
997
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
998
                               help="Specify the node interface (cluster-wide)"
999
                               " on which the master IP address will be added"
1000
                               " (cluster init default: %s)" %
1001
                               constants.DEFAULT_BRIDGE,
1002
                               metavar="NETDEV",
1003
                               default=None)
1004

    
1005
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1006
                                help="Specify the default directory (cluster-"
1007
                                "wide) for storing the file-based disks [%s]" %
1008
                                constants.DEFAULT_FILE_STORAGE_DIR,
1009
                                metavar="DIR",
1010
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1011

    
1012
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1013
                            dest="shared_file_storage_dir",
1014
                            help="Specify the default directory (cluster-"
1015
                            "wide) for storing the shared file-based"
1016
                            " disks [%s]" %
1017
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1018
                            metavar="SHAREDDIR",
1019
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1020

    
1021
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1022
                                   help="Don't modify /etc/hosts",
1023
                                   action="store_false", default=True)
1024

    
1025
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1026
                                    help="Don't initialize SSH keys",
1027
                                    action="store_false", default=True)
1028

    
1029
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1030
                             help="Enable parseable error messages",
1031
                             action="store_true", default=False)
1032

    
1033
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1034
                          help="Skip N+1 memory redundancy tests",
1035
                          action="store_true", default=False)
1036

    
1037
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1038
                             help="Type of reboot: soft/hard/full",
1039
                             default=constants.INSTANCE_REBOOT_HARD,
1040
                             metavar="<REBOOT>",
1041
                             choices=list(constants.REBOOT_TYPES))
1042

    
1043
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1044
                                    dest="ignore_secondaries",
1045
                                    default=False, action="store_true",
1046
                                    help="Ignore errors from secondaries")
1047

    
1048
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1049
                            action="store_false", default=True,
1050
                            help="Don't shutdown the instance (unsafe)")
1051

    
1052
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1053
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1054
                         help="Maximum time to wait")
1055

    
1056
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1057
                         dest="shutdown_timeout", type="int",
1058
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1059
                         help="Maximum time to wait for instance shutdown")
1060

    
1061
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1062
                          default=None,
1063
                          help=("Number of seconds between repetions of the"
1064
                                " command"))
1065

    
1066
EARLY_RELEASE_OPT = cli_option("--early-release",
1067
                               dest="early_release", default=False,
1068
                               action="store_true",
1069
                               help="Release the locks on the secondary"
1070
                               " node(s) early")
1071

    
1072
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1073
                                  dest="new_cluster_cert",
1074
                                  default=False, action="store_true",
1075
                                  help="Generate a new cluster certificate")
1076

    
1077
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1078
                           default=None,
1079
                           help="File containing new RAPI certificate")
1080

    
1081
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1082
                               default=None, action="store_true",
1083
                               help=("Generate a new self-signed RAPI"
1084
                                     " certificate"))
1085

    
1086
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1087
                                    dest="new_confd_hmac_key",
1088
                                    default=False, action="store_true",
1089
                                    help=("Create a new HMAC key for %s" %
1090
                                          constants.CONFD))
1091

    
1092
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1093
                                       dest="cluster_domain_secret",
1094
                                       default=None,
1095
                                       help=("Load new new cluster domain"
1096
                                             " secret from file"))
1097

    
1098
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1099
                                           dest="new_cluster_domain_secret",
1100
                                           default=False, action="store_true",
1101
                                           help=("Create a new cluster domain"
1102
                                                 " secret"))
1103

    
1104
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1105
                              dest="use_replication_network",
1106
                              help="Whether to use the replication network"
1107
                              " for talking to the nodes",
1108
                              action="store_true", default=False)
1109

    
1110
MAINTAIN_NODE_HEALTH_OPT = \
1111
    cli_option("--maintain-node-health", dest="maintain_node_health",
1112
               metavar=_YORNO, default=None, type="bool",
1113
               help="Configure the cluster to automatically maintain node"
1114
               " health, by shutting down unknown instances, shutting down"
1115
               " unknown DRBD devices, etc.")
1116

    
1117
IDENTIFY_DEFAULTS_OPT = \
1118
    cli_option("--identify-defaults", dest="identify_defaults",
1119
               default=False, action="store_true",
1120
               help="Identify which saved instance parameters are equal to"
1121
               " the current cluster defaults and set them as such, instead"
1122
               " of marking them as overridden")
1123

    
1124
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1125
                         action="store", dest="uid_pool",
1126
                         help=("A list of user-ids or user-id"
1127
                               " ranges separated by commas"))
1128

    
1129
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1130
                          action="store", dest="add_uids",
1131
                          help=("A list of user-ids or user-id"
1132
                                " ranges separated by commas, to be"
1133
                                " added to the user-id pool"))
1134

    
1135
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1136
                             action="store", dest="remove_uids",
1137
                             help=("A list of user-ids or user-id"
1138
                                   " ranges separated by commas, to be"
1139
                                   " removed from the user-id pool"))
1140

    
1141
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1142
                             action="store", dest="reserved_lvs",
1143
                             help=("A comma-separated list of reserved"
1144
                                   " logical volumes names, that will be"
1145
                                   " ignored by cluster verify"))
1146

    
1147
ROMAN_OPT = cli_option("--roman",
1148
                       dest="roman_integers", default=False,
1149
                       action="store_true",
1150
                       help="Use roman numbers for positive integers")
1151

    
1152
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1153
                             action="store", default=None,
1154
                             help="Specifies usermode helper for DRBD")
1155

    
1156
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1157
                                action="store_false", default=True,
1158
                                help="Disable support for DRBD")
1159

    
1160
PRIMARY_IP_VERSION_OPT = \
1161
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1162
               action="store", dest="primary_ip_version",
1163
               metavar="%d|%d" % (constants.IP4_VERSION,
1164
                                  constants.IP6_VERSION),
1165
               help="Cluster-wide IP version for primary IP")
1166

    
1167
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1168
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1169
                          choices=_PRIONAME_TO_VALUE.keys(),
1170
                          help="Priority for opcode processing")
1171

    
1172
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1173
                        type="bool", default=None, metavar=_YORNO,
1174
                        help="Sets the hidden flag on the OS")
1175

    
1176
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1177
                        type="bool", default=None, metavar=_YORNO,
1178
                        help="Sets the blacklisted flag on the OS")
1179

    
1180
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1181
                                     type="bool", metavar=_YORNO,
1182
                                     dest="prealloc_wipe_disks",
1183
                                     help=("Wipe disks prior to instance"
1184
                                           " creation"))
1185

    
1186
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1187
                             type="keyval", default=None,
1188
                             help="Node parameters")
1189

    
1190
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1191
                              action="store", metavar="POLICY", default=None,
1192
                              help="Allocation policy for the node group")
1193

    
1194
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1195
                              type="bool", metavar=_YORNO,
1196
                              dest="node_powered",
1197
                              help="Specify if the SoR for node is powered")
1198

    
1199
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1200
                         default=constants.OOB_TIMEOUT,
1201
                         help="Maximum time to wait for out-of-band helper")
1202

    
1203
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1204
                             default=constants.OOB_POWER_DELAY,
1205
                             help="Time in seconds to wait between power-ons")
1206

    
1207
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1208
                              action="store_true", default=False,
1209
                              help=("Whether command argument should be treated"
1210
                                    " as filter"))
1211

    
1212
NO_REMEMBER_OPT = cli_option("--no-remember",
1213
                             dest="no_remember",
1214
                             action="store_true", default=False,
1215
                             help="Perform but do not record the change"
1216
                             " in the configuration")
1217

    
1218
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1219
                              default=False, action="store_true",
1220
                              help="Evacuate primary instances only")
1221

    
1222
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1223
                                default=False, action="store_true",
1224
                                help="Evacuate secondary instances only"
1225
                                     " (applies only to internally mirrored"
1226
                                     " disk templates, e.g. %s)" %
1227
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1228

    
1229
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1230
                                action="store_true", default=False,
1231
                                help="Pause instance at startup")
1232

    
1233
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1234
                          help="Destination node group (name or uuid)",
1235
                          default=None, action="append",
1236
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1237

    
1238

    
1239
#: Options provided by all commands
1240
COMMON_OPTS = [DEBUG_OPT]
1241

    
1242
# common options for creating instances. add and import then add their own
1243
# specific ones.
1244
COMMON_CREATE_OPTS = [
1245
  BACKEND_OPT,
1246
  DISK_OPT,
1247
  DISK_TEMPLATE_OPT,
1248
  FILESTORE_DIR_OPT,
1249
  FILESTORE_DRIVER_OPT,
1250
  HYPERVISOR_OPT,
1251
  IALLOCATOR_OPT,
1252
  NET_OPT,
1253
  NODE_PLACEMENT_OPT,
1254
  NOIPCHECK_OPT,
1255
  NONAMECHECK_OPT,
1256
  NONICS_OPT,
1257
  NWSYNC_OPT,
1258
  OSPARAMS_OPT,
1259
  OS_SIZE_OPT,
1260
  SUBMIT_OPT,
1261
  TAG_ADD_OPT,
1262
  DRY_RUN_OPT,
1263
  PRIORITY_OPT,
1264
  ]
1265

    
1266

    
1267
def _ParseArgs(argv, commands, aliases):
1268
  """Parser for the command line arguments.
1269

1270
  This function parses the arguments and returns the function which
1271
  must be executed together with its (modified) arguments.
1272

1273
  @param argv: the command line
1274
  @param commands: dictionary with special contents, see the design
1275
      doc for cmdline handling
1276
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1277

1278
  """
1279
  if len(argv) == 0:
1280
    binary = "<command>"
1281
  else:
1282
    binary = argv[0].split("/")[-1]
1283

    
1284
  if len(argv) > 1 and argv[1] == "--version":
1285
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1286
             constants.RELEASE_VERSION)
1287
    # Quit right away. That way we don't have to care about this special
1288
    # argument. optparse.py does it the same.
1289
    sys.exit(0)
1290

    
1291
  if len(argv) < 2 or not (argv[1] in commands or
1292
                           argv[1] in aliases):
1293
    # let's do a nice thing
1294
    sortedcmds = commands.keys()
1295
    sortedcmds.sort()
1296

    
1297
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1298
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1299
    ToStdout("")
1300

    
1301
    # compute the max line length for cmd + usage
1302
    mlen = max([len(" %s" % cmd) for cmd in commands])
1303
    mlen = min(60, mlen) # should not get here...
1304

    
1305
    # and format a nice command list
1306
    ToStdout("Commands:")
1307
    for cmd in sortedcmds:
1308
      cmdstr = " %s" % (cmd,)
1309
      help_text = commands[cmd][4]
1310
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1311
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1312
      for line in help_lines:
1313
        ToStdout("%-*s   %s", mlen, "", line)
1314

    
1315
    ToStdout("")
1316

    
1317
    return None, None, None
1318

    
1319
  # get command, unalias it, and look it up in commands
1320
  cmd = argv.pop(1)
1321
  if cmd in aliases:
1322
    if cmd in commands:
1323
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1324
                                   " command" % cmd)
1325

    
1326
    if aliases[cmd] not in commands:
1327
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1328
                                   " command '%s'" % (cmd, aliases[cmd]))
1329

    
1330
    cmd = aliases[cmd]
1331

    
1332
  func, args_def, parser_opts, usage, description = commands[cmd]
1333
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1334
                        description=description,
1335
                        formatter=TitledHelpFormatter(),
1336
                        usage="%%prog %s %s" % (cmd, usage))
1337
  parser.disable_interspersed_args()
1338
  options, args = parser.parse_args()
1339

    
1340
  if not _CheckArguments(cmd, args_def, args):
1341
    return None, None, None
1342

    
1343
  return func, options, args
1344

    
1345

    
1346
def _CheckArguments(cmd, args_def, args):
1347
  """Verifies the arguments using the argument definition.
1348

1349
  Algorithm:
1350

1351
    1. Abort with error if values specified by user but none expected.
1352

1353
    1. For each argument in definition
1354

1355
      1. Keep running count of minimum number of values (min_count)
1356
      1. Keep running count of maximum number of values (max_count)
1357
      1. If it has an unlimited number of values
1358

1359
        1. Abort with error if it's not the last argument in the definition
1360

1361
    1. If last argument has limited number of values
1362

1363
      1. Abort with error if number of values doesn't match or is too large
1364

1365
    1. Abort with error if user didn't pass enough values (min_count)
1366

1367
  """
1368
  if args and not args_def:
1369
    ToStderr("Error: Command %s expects no arguments", cmd)
1370
    return False
1371

    
1372
  min_count = None
1373
  max_count = None
1374
  check_max = None
1375

    
1376
  last_idx = len(args_def) - 1
1377

    
1378
  for idx, arg in enumerate(args_def):
1379
    if min_count is None:
1380
      min_count = arg.min
1381
    elif arg.min is not None:
1382
      min_count += arg.min
1383

    
1384
    if max_count is None:
1385
      max_count = arg.max
1386
    elif arg.max is not None:
1387
      max_count += arg.max
1388

    
1389
    if idx == last_idx:
1390
      check_max = (arg.max is not None)
1391

    
1392
    elif arg.max is None:
1393
      raise errors.ProgrammerError("Only the last argument can have max=None")
1394

    
1395
  if check_max:
1396
    # Command with exact number of arguments
1397
    if (min_count is not None and max_count is not None and
1398
        min_count == max_count and len(args) != min_count):
1399
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1400
      return False
1401

    
1402
    # Command with limited number of arguments
1403
    if max_count is not None and len(args) > max_count:
1404
      ToStderr("Error: Command %s expects only %d argument(s)",
1405
               cmd, max_count)
1406
      return False
1407

    
1408
  # Command with some required arguments
1409
  if min_count is not None and len(args) < min_count:
1410
    ToStderr("Error: Command %s expects at least %d argument(s)",
1411
             cmd, min_count)
1412
    return False
1413

    
1414
  return True
1415

    
1416

    
1417
def SplitNodeOption(value):
1418
  """Splits the value of a --node option.
1419

1420
  """
1421
  if value and ":" in value:
1422
    return value.split(":", 1)
1423
  else:
1424
    return (value, None)
1425

    
1426

    
1427
def CalculateOSNames(os_name, os_variants):
1428
  """Calculates all the names an OS can be called, according to its variants.
1429

1430
  @type os_name: string
1431
  @param os_name: base name of the os
1432
  @type os_variants: list or None
1433
  @param os_variants: list of supported variants
1434
  @rtype: list
1435
  @return: list of valid names
1436

1437
  """
1438
  if os_variants:
1439
    return ["%s+%s" % (os_name, v) for v in os_variants]
1440
  else:
1441
    return [os_name]
1442

    
1443

    
1444
def ParseFields(selected, default):
1445
  """Parses the values of "--field"-like options.
1446

1447
  @type selected: string or None
1448
  @param selected: User-selected options
1449
  @type default: list
1450
  @param default: Default fields
1451

1452
  """
1453
  if selected is None:
1454
    return default
1455

    
1456
  if selected.startswith("+"):
1457
    return default + selected[1:].split(",")
1458

    
1459
  return selected.split(",")
1460

    
1461

    
1462
UsesRPC = rpc.RunWithRPC
1463

    
1464

    
1465
def AskUser(text, choices=None):
1466
  """Ask the user a question.
1467

1468
  @param text: the question to ask
1469

1470
  @param choices: list with elements tuples (input_char, return_value,
1471
      description); if not given, it will default to: [('y', True,
1472
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1473
      note that the '?' char is reserved for help
1474

1475
  @return: one of the return values from the choices list; if input is
1476
      not possible (i.e. not running with a tty, we return the last
1477
      entry from the list
1478

1479
  """
1480
  if choices is None:
1481
    choices = [("y", True, "Perform the operation"),
1482
               ("n", False, "Do not perform the operation")]
1483
  if not choices or not isinstance(choices, list):
1484
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1485
  for entry in choices:
1486
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1487
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1488

    
1489
  answer = choices[-1][1]
1490
  new_text = []
1491
  for line in text.splitlines():
1492
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1493
  text = "\n".join(new_text)
1494
  try:
1495
    f = file("/dev/tty", "a+")
1496
  except IOError:
1497
    return answer
1498
  try:
1499
    chars = [entry[0] for entry in choices]
1500
    chars[-1] = "[%s]" % chars[-1]
1501
    chars.append("?")
1502
    maps = dict([(entry[0], entry[1]) for entry in choices])
1503
    while True:
1504
      f.write(text)
1505
      f.write("\n")
1506
      f.write("/".join(chars))
1507
      f.write(": ")
1508
      line = f.readline(2).strip().lower()
1509
      if line in maps:
1510
        answer = maps[line]
1511
        break
1512
      elif line == "?":
1513
        for entry in choices:
1514
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1515
        f.write("\n")
1516
        continue
1517
  finally:
1518
    f.close()
1519
  return answer
1520

    
1521

    
1522
class JobSubmittedException(Exception):
1523
  """Job was submitted, client should exit.
1524

1525
  This exception has one argument, the ID of the job that was
1526
  submitted. The handler should print this ID.
1527

1528
  This is not an error, just a structured way to exit from clients.
1529

1530
  """
1531

    
1532

    
1533
def SendJob(ops, cl=None):
1534
  """Function to submit an opcode without waiting for the results.
1535

1536
  @type ops: list
1537
  @param ops: list of opcodes
1538
  @type cl: luxi.Client
1539
  @param cl: the luxi client to use for communicating with the master;
1540
             if None, a new client will be created
1541

1542
  """
1543
  if cl is None:
1544
    cl = GetClient()
1545

    
1546
  job_id = cl.SubmitJob(ops)
1547

    
1548
  return job_id
1549

    
1550

    
1551
def GenericPollJob(job_id, cbs, report_cbs):
1552
  """Generic job-polling function.
1553

1554
  @type job_id: number
1555
  @param job_id: Job ID
1556
  @type cbs: Instance of L{JobPollCbBase}
1557
  @param cbs: Data callbacks
1558
  @type report_cbs: Instance of L{JobPollReportCbBase}
1559
  @param report_cbs: Reporting callbacks
1560

1561
  """
1562
  prev_job_info = None
1563
  prev_logmsg_serial = None
1564

    
1565
  status = None
1566

    
1567
  while True:
1568
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1569
                                      prev_logmsg_serial)
1570
    if not result:
1571
      # job not found, go away!
1572
      raise errors.JobLost("Job with id %s lost" % job_id)
1573

    
1574
    if result == constants.JOB_NOTCHANGED:
1575
      report_cbs.ReportNotChanged(job_id, status)
1576

    
1577
      # Wait again
1578
      continue
1579

    
1580
    # Split result, a tuple of (field values, log entries)
1581
    (job_info, log_entries) = result
1582
    (status, ) = job_info
1583

    
1584
    if log_entries:
1585
      for log_entry in log_entries:
1586
        (serial, timestamp, log_type, message) = log_entry
1587
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1588
                                    log_type, message)
1589
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1590

    
1591
    # TODO: Handle canceled and archived jobs
1592
    elif status in (constants.JOB_STATUS_SUCCESS,
1593
                    constants.JOB_STATUS_ERROR,
1594
                    constants.JOB_STATUS_CANCELING,
1595
                    constants.JOB_STATUS_CANCELED):
1596
      break
1597

    
1598
    prev_job_info = job_info
1599

    
1600
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1601
  if not jobs:
1602
    raise errors.JobLost("Job with id %s lost" % job_id)
1603

    
1604
  status, opstatus, result = jobs[0]
1605

    
1606
  if status == constants.JOB_STATUS_SUCCESS:
1607
    return result
1608

    
1609
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1610
    raise errors.OpExecError("Job was canceled")
1611

    
1612
  has_ok = False
1613
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1614
    if status == constants.OP_STATUS_SUCCESS:
1615
      has_ok = True
1616
    elif status == constants.OP_STATUS_ERROR:
1617
      errors.MaybeRaise(msg)
1618

    
1619
      if has_ok:
1620
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1621
                                 (idx, msg))
1622

    
1623
      raise errors.OpExecError(str(msg))
1624

    
1625
  # default failure mode
1626
  raise errors.OpExecError(result)
1627

    
1628

    
1629
class JobPollCbBase:
1630
  """Base class for L{GenericPollJob} callbacks.
1631

1632
  """
1633
  def __init__(self):
1634
    """Initializes this class.
1635

1636
    """
1637

    
1638
  def WaitForJobChangeOnce(self, job_id, fields,
1639
                           prev_job_info, prev_log_serial):
1640
    """Waits for changes on a job.
1641

1642
    """
1643
    raise NotImplementedError()
1644

    
1645
  def QueryJobs(self, job_ids, fields):
1646
    """Returns the selected fields for the selected job IDs.
1647

1648
    @type job_ids: list of numbers
1649
    @param job_ids: Job IDs
1650
    @type fields: list of strings
1651
    @param fields: Fields
1652

1653
    """
1654
    raise NotImplementedError()
1655

    
1656

    
1657
class JobPollReportCbBase:
1658
  """Base class for L{GenericPollJob} reporting callbacks.
1659

1660
  """
1661
  def __init__(self):
1662
    """Initializes this class.
1663

1664
    """
1665

    
1666
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1667
    """Handles a log message.
1668

1669
    """
1670
    raise NotImplementedError()
1671

    
1672
  def ReportNotChanged(self, job_id, status):
1673
    """Called for if a job hasn't changed in a while.
1674

1675
    @type job_id: number
1676
    @param job_id: Job ID
1677
    @type status: string or None
1678
    @param status: Job status if available
1679

1680
    """
1681
    raise NotImplementedError()
1682

    
1683

    
1684
class _LuxiJobPollCb(JobPollCbBase):
1685
  def __init__(self, cl):
1686
    """Initializes this class.
1687

1688
    """
1689
    JobPollCbBase.__init__(self)
1690
    self.cl = cl
1691

    
1692
  def WaitForJobChangeOnce(self, job_id, fields,
1693
                           prev_job_info, prev_log_serial):
1694
    """Waits for changes on a job.
1695

1696
    """
1697
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1698
                                        prev_job_info, prev_log_serial)
1699

    
1700
  def QueryJobs(self, job_ids, fields):
1701
    """Returns the selected fields for the selected job IDs.
1702

1703
    """
1704
    return self.cl.QueryJobs(job_ids, fields)
1705

    
1706

    
1707
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1708
  def __init__(self, feedback_fn):
1709
    """Initializes this class.
1710

1711
    """
1712
    JobPollReportCbBase.__init__(self)
1713

    
1714
    self.feedback_fn = feedback_fn
1715

    
1716
    assert callable(feedback_fn)
1717

    
1718
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1719
    """Handles a log message.
1720

1721
    """
1722
    self.feedback_fn((timestamp, log_type, log_msg))
1723

    
1724
  def ReportNotChanged(self, job_id, status):
1725
    """Called if a job hasn't changed in a while.
1726

1727
    """
1728
    # Ignore
1729

    
1730

    
1731
class StdioJobPollReportCb(JobPollReportCbBase):
1732
  def __init__(self):
1733
    """Initializes this class.
1734

1735
    """
1736
    JobPollReportCbBase.__init__(self)
1737

    
1738
    self.notified_queued = False
1739
    self.notified_waitlock = False
1740

    
1741
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1742
    """Handles a log message.
1743

1744
    """
1745
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1746
             FormatLogMessage(log_type, log_msg))
1747

    
1748
  def ReportNotChanged(self, job_id, status):
1749
    """Called if a job hasn't changed in a while.
1750

1751
    """
1752
    if status is None:
1753
      return
1754

    
1755
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1756
      ToStderr("Job %s is waiting in queue", job_id)
1757
      self.notified_queued = True
1758

    
1759
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1760
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1761
      self.notified_waitlock = True
1762

    
1763

    
1764
def FormatLogMessage(log_type, log_msg):
1765
  """Formats a job message according to its type.
1766

1767
  """
1768
  if log_type != constants.ELOG_MESSAGE:
1769
    log_msg = str(log_msg)
1770

    
1771
  return utils.SafeEncode(log_msg)
1772

    
1773

    
1774
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1775
  """Function to poll for the result of a job.
1776

1777
  @type job_id: job identified
1778
  @param job_id: the job to poll for results
1779
  @type cl: luxi.Client
1780
  @param cl: the luxi client to use for communicating with the master;
1781
             if None, a new client will be created
1782

1783
  """
1784
  if cl is None:
1785
    cl = GetClient()
1786

    
1787
  if reporter is None:
1788
    if feedback_fn:
1789
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1790
    else:
1791
      reporter = StdioJobPollReportCb()
1792
  elif feedback_fn:
1793
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1794

    
1795
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1796

    
1797

    
1798
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1799
  """Legacy function to submit an opcode.
1800

1801
  This is just a simple wrapper over the construction of the processor
1802
  instance. It should be extended to better handle feedback and
1803
  interaction functions.
1804

1805
  """
1806
  if cl is None:
1807
    cl = GetClient()
1808

    
1809
  SetGenericOpcodeOpts([op], opts)
1810

    
1811
  job_id = SendJob([op], cl=cl)
1812

    
1813
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1814
                       reporter=reporter)
1815

    
1816
  return op_results[0]
1817

    
1818

    
1819
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1820
  """Wrapper around SubmitOpCode or SendJob.
1821

1822
  This function will decide, based on the 'opts' parameter, whether to
1823
  submit and wait for the result of the opcode (and return it), or
1824
  whether to just send the job and print its identifier. It is used in
1825
  order to simplify the implementation of the '--submit' option.
1826

1827
  It will also process the opcodes if we're sending the via SendJob
1828
  (otherwise SubmitOpCode does it).
1829

1830
  """
1831
  if opts and opts.submit_only:
1832
    job = [op]
1833
    SetGenericOpcodeOpts(job, opts)
1834
    job_id = SendJob(job, cl=cl)
1835
    raise JobSubmittedException(job_id)
1836
  else:
1837
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1838

    
1839

    
1840
def SetGenericOpcodeOpts(opcode_list, options):
1841
  """Processor for generic options.
1842

1843
  This function updates the given opcodes based on generic command
1844
  line options (like debug, dry-run, etc.).
1845

1846
  @param opcode_list: list of opcodes
1847
  @param options: command line options or None
1848
  @return: None (in-place modification)
1849

1850
  """
1851
  if not options:
1852
    return
1853
  for op in opcode_list:
1854
    op.debug_level = options.debug
1855
    if hasattr(options, "dry_run"):
1856
      op.dry_run = options.dry_run
1857
    if getattr(options, "priority", None) is not None:
1858
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1859

    
1860

    
1861
def GetClient():
1862
  # TODO: Cache object?
1863
  try:
1864
    client = luxi.Client()
1865
  except luxi.NoMasterError:
1866
    ss = ssconf.SimpleStore()
1867

    
1868
    # Try to read ssconf file
1869
    try:
1870
      ss.GetMasterNode()
1871
    except errors.ConfigurationError:
1872
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1873
                                 " not part of a cluster")
1874

    
1875
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1876
    if master != myself:
1877
      raise errors.OpPrereqError("This is not the master node, please connect"
1878
                                 " to node '%s' and rerun the command" %
1879
                                 master)
1880
    raise
1881
  return client
1882

    
1883

    
1884
def FormatError(err):
1885
  """Return a formatted error message for a given error.
1886

1887
  This function takes an exception instance and returns a tuple
1888
  consisting of two values: first, the recommended exit code, and
1889
  second, a string describing the error message (not
1890
  newline-terminated).
1891

1892
  """
1893
  retcode = 1
1894
  obuf = StringIO()
1895
  msg = str(err)
1896
  if isinstance(err, errors.ConfigurationError):
1897
    txt = "Corrupt configuration file: %s" % msg
1898
    logging.error(txt)
1899
    obuf.write(txt + "\n")
1900
    obuf.write("Aborting.")
1901
    retcode = 2
1902
  elif isinstance(err, errors.HooksAbort):
1903
    obuf.write("Failure: hooks execution failed:\n")
1904
    for node, script, out in err.args[0]:
1905
      if out:
1906
        obuf.write("  node: %s, script: %s, output: %s\n" %
1907
                   (node, script, out))
1908
      else:
1909
        obuf.write("  node: %s, script: %s (no output)\n" %
1910
                   (node, script))
1911
  elif isinstance(err, errors.HooksFailure):
1912
    obuf.write("Failure: hooks general failure: %s" % msg)
1913
  elif isinstance(err, errors.ResolverError):
1914
    this_host = netutils.Hostname.GetSysName()
1915
    if err.args[0] == this_host:
1916
      msg = "Failure: can't resolve my own hostname ('%s')"
1917
    else:
1918
      msg = "Failure: can't resolve hostname '%s'"
1919
    obuf.write(msg % err.args[0])
1920
  elif isinstance(err, errors.OpPrereqError):
1921
    if len(err.args) == 2:
1922
      obuf.write("Failure: prerequisites not met for this"
1923
               " operation:\nerror type: %s, error details:\n%s" %
1924
                 (err.args[1], err.args[0]))
1925
    else:
1926
      obuf.write("Failure: prerequisites not met for this"
1927
                 " operation:\n%s" % msg)
1928
  elif isinstance(err, errors.OpExecError):
1929
    obuf.write("Failure: command execution error:\n%s" % msg)
1930
  elif isinstance(err, errors.TagError):
1931
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1932
  elif isinstance(err, errors.JobQueueDrainError):
1933
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1934
               " accept new requests\n")
1935
  elif isinstance(err, errors.JobQueueFull):
1936
    obuf.write("Failure: the job queue is full and doesn't accept new"
1937
               " job submissions until old jobs are archived\n")
1938
  elif isinstance(err, errors.TypeEnforcementError):
1939
    obuf.write("Parameter Error: %s" % msg)
1940
  elif isinstance(err, errors.ParameterError):
1941
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1942
  elif isinstance(err, luxi.NoMasterError):
1943
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1944
               " and listening for connections?")
1945
  elif isinstance(err, luxi.TimeoutError):
1946
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1947
               " been submitted and will continue to run even if the call"
1948
               " timed out. Useful commands in this situation are \"gnt-job"
1949
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1950
    obuf.write(msg)
1951
  elif isinstance(err, luxi.PermissionError):
1952
    obuf.write("It seems you don't have permissions to connect to the"
1953
               " master daemon.\nPlease retry as a different user.")
1954
  elif isinstance(err, luxi.ProtocolError):
1955
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1956
               "%s" % msg)
1957
  elif isinstance(err, errors.JobLost):
1958
    obuf.write("Error checking job status: %s" % msg)
1959
  elif isinstance(err, errors.QueryFilterParseError):
1960
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1961
    obuf.write("\n".join(err.GetDetails()))
1962
  elif isinstance(err, errors.GenericError):
1963
    obuf.write("Unhandled Ganeti error: %s" % msg)
1964
  elif isinstance(err, JobSubmittedException):
1965
    obuf.write("JobID: %s\n" % err.args[0])
1966
    retcode = 0
1967
  else:
1968
    obuf.write("Unhandled exception: %s" % msg)
1969
  return retcode, obuf.getvalue().rstrip("\n")
1970

    
1971

    
1972
def GenericMain(commands, override=None, aliases=None):
1973
  """Generic main function for all the gnt-* commands.
1974

1975
  Arguments:
1976
    - commands: a dictionary with a special structure, see the design doc
1977
                for command line handling.
1978
    - override: if not None, we expect a dictionary with keys that will
1979
                override command line options; this can be used to pass
1980
                options from the scripts to generic functions
1981
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1982

1983
  """
1984
  # save the program name and the entire command line for later logging
1985
  if sys.argv:
1986
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1987
    if len(sys.argv) >= 2:
1988
      binary += " " + sys.argv[1]
1989
      old_cmdline = " ".join(sys.argv[2:])
1990
    else:
1991
      old_cmdline = ""
1992
  else:
1993
    binary = "<unknown program>"
1994
    old_cmdline = ""
1995

    
1996
  if aliases is None:
1997
    aliases = {}
1998

    
1999
  try:
2000
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
2001
  except errors.ParameterError, err:
2002
    result, err_msg = FormatError(err)
2003
    ToStderr(err_msg)
2004
    return 1
2005

    
2006
  if func is None: # parse error
2007
    return 1
2008

    
2009
  if override is not None:
2010
    for key, val in override.iteritems():
2011
      setattr(options, key, val)
2012

    
2013
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2014
                     stderr_logging=True)
2015

    
2016
  if old_cmdline:
2017
    logging.info("run with arguments '%s'", old_cmdline)
2018
  else:
2019
    logging.info("run with no arguments")
2020

    
2021
  try:
2022
    result = func(options, args)
2023
  except (errors.GenericError, luxi.ProtocolError,
2024
          JobSubmittedException), err:
2025
    result, err_msg = FormatError(err)
2026
    logging.exception("Error during command processing")
2027
    ToStderr(err_msg)
2028
  except KeyboardInterrupt:
2029
    result = constants.EXIT_FAILURE
2030
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2031
             " might have been submitted and"
2032
             " will continue to run in the background.")
2033
  except IOError, err:
2034
    if err.errno == errno.EPIPE:
2035
      # our terminal went away, we'll exit
2036
      sys.exit(constants.EXIT_FAILURE)
2037
    else:
2038
      raise
2039

    
2040
  return result
2041

    
2042

    
2043
def ParseNicOption(optvalue):
2044
  """Parses the value of the --net option(s).
2045

2046
  """
2047
  try:
2048
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2049
  except (TypeError, ValueError), err:
2050
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2051

    
2052
  nics = [{}] * nic_max
2053
  for nidx, ndict in optvalue:
2054
    nidx = int(nidx)
2055

    
2056
    if not isinstance(ndict, dict):
2057
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2058
                                 " got %s" % (nidx, ndict))
2059

    
2060
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2061

    
2062
    nics[nidx] = ndict
2063

    
2064
  return nics
2065

    
2066

    
2067
def GenericInstanceCreate(mode, opts, args):
2068
  """Add an instance to the cluster via either creation or import.
2069

2070
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2071
  @param opts: the command line options selected by the user
2072
  @type args: list
2073
  @param args: should contain only one element, the new instance name
2074
  @rtype: int
2075
  @return: the desired exit code
2076

2077
  """
2078
  instance = args[0]
2079

    
2080
  (pnode, snode) = SplitNodeOption(opts.node)
2081

    
2082
  hypervisor = None
2083
  hvparams = {}
2084
  if opts.hypervisor:
2085
    hypervisor, hvparams = opts.hypervisor
2086

    
2087
  if opts.nics:
2088
    nics = ParseNicOption(opts.nics)
2089
  elif opts.no_nics:
2090
    # no nics
2091
    nics = []
2092
  elif mode == constants.INSTANCE_CREATE:
2093
    # default of one nic, all auto
2094
    nics = [{}]
2095
  else:
2096
    # mode == import
2097
    nics = []
2098

    
2099
  if opts.disk_template == constants.DT_DISKLESS:
2100
    if opts.disks or opts.sd_size is not None:
2101
      raise errors.OpPrereqError("Diskless instance but disk"
2102
                                 " information passed")
2103
    disks = []
2104
  else:
2105
    if (not opts.disks and not opts.sd_size
2106
        and mode == constants.INSTANCE_CREATE):
2107
      raise errors.OpPrereqError("No disk information specified")
2108
    if opts.disks and opts.sd_size is not None:
2109
      raise errors.OpPrereqError("Please use either the '--disk' or"
2110
                                 " '-s' option")
2111
    if opts.sd_size is not None:
2112
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2113

    
2114
    if opts.disks:
2115
      try:
2116
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2117
      except ValueError, err:
2118
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2119
      disks = [{}] * disk_max
2120
    else:
2121
      disks = []
2122
    for didx, ddict in opts.disks:
2123
      didx = int(didx)
2124
      if not isinstance(ddict, dict):
2125
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2126
        raise errors.OpPrereqError(msg)
2127
      elif constants.IDISK_SIZE in ddict:
2128
        if constants.IDISK_ADOPT in ddict:
2129
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2130
                                     " (disk %d)" % didx)
2131
        try:
2132
          ddict[constants.IDISK_SIZE] = \
2133
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2134
        except ValueError, err:
2135
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2136
                                     (didx, err))
2137
      elif constants.IDISK_ADOPT in ddict:
2138
        if mode == constants.INSTANCE_IMPORT:
2139
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2140
                                     " import")
2141
        ddict[constants.IDISK_SIZE] = 0
2142
      else:
2143
        raise errors.OpPrereqError("Missing size or adoption source for"
2144
                                   " disk %d" % didx)
2145
      disks[didx] = ddict
2146

    
2147
  if opts.tags is not None:
2148
    tags = opts.tags.split(",")
2149
  else:
2150
    tags = []
2151

    
2152
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2153
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2154

    
2155
  if mode == constants.INSTANCE_CREATE:
2156
    start = opts.start
2157
    os_type = opts.os
2158
    force_variant = opts.force_variant
2159
    src_node = None
2160
    src_path = None
2161
    no_install = opts.no_install
2162
    identify_defaults = False
2163
  elif mode == constants.INSTANCE_IMPORT:
2164
    start = False
2165
    os_type = None
2166
    force_variant = False
2167
    src_node = opts.src_node
2168
    src_path = opts.src_dir
2169
    no_install = None
2170
    identify_defaults = opts.identify_defaults
2171
  else:
2172
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2173

    
2174
  op = opcodes.OpInstanceCreate(instance_name=instance,
2175
                                disks=disks,
2176
                                disk_template=opts.disk_template,
2177
                                nics=nics,
2178
                                pnode=pnode, snode=snode,
2179
                                ip_check=opts.ip_check,
2180
                                name_check=opts.name_check,
2181
                                wait_for_sync=opts.wait_for_sync,
2182
                                file_storage_dir=opts.file_storage_dir,
2183
                                file_driver=opts.file_driver,
2184
                                iallocator=opts.iallocator,
2185
                                hypervisor=hypervisor,
2186
                                hvparams=hvparams,
2187
                                beparams=opts.beparams,
2188
                                osparams=opts.osparams,
2189
                                mode=mode,
2190
                                start=start,
2191
                                os_type=os_type,
2192
                                force_variant=force_variant,
2193
                                src_node=src_node,
2194
                                src_path=src_path,
2195
                                tags=tags,
2196
                                no_install=no_install,
2197
                                identify_defaults=identify_defaults)
2198

    
2199
  SubmitOrSend(op, opts)
2200
  return 0
2201

    
2202

    
2203
class _RunWhileClusterStoppedHelper:
2204
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2205

2206
  """
2207
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2208
    """Initializes this class.
2209

2210
    @type feedback_fn: callable
2211
    @param feedback_fn: Feedback function
2212
    @type cluster_name: string
2213
    @param cluster_name: Cluster name
2214
    @type master_node: string
2215
    @param master_node Master node name
2216
    @type online_nodes: list
2217
    @param online_nodes: List of names of online nodes
2218

2219
    """
2220
    self.feedback_fn = feedback_fn
2221
    self.cluster_name = cluster_name
2222
    self.master_node = master_node
2223
    self.online_nodes = online_nodes
2224

    
2225
    self.ssh = ssh.SshRunner(self.cluster_name)
2226

    
2227
    self.nonmaster_nodes = [name for name in online_nodes
2228
                            if name != master_node]
2229

    
2230
    assert self.master_node not in self.nonmaster_nodes
2231

    
2232
  def _RunCmd(self, node_name, cmd):
2233
    """Runs a command on the local or a remote machine.
2234

2235
    @type node_name: string
2236
    @param node_name: Machine name
2237
    @type cmd: list
2238
    @param cmd: Command
2239

2240
    """
2241
    if node_name is None or node_name == self.master_node:
2242
      # No need to use SSH
2243
      result = utils.RunCmd(cmd)
2244
    else:
2245
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2246

    
2247
    if result.failed:
2248
      errmsg = ["Failed to run command %s" % result.cmd]
2249
      if node_name:
2250
        errmsg.append("on node %s" % node_name)
2251
      errmsg.append(": exitcode %s and error %s" %
2252
                    (result.exit_code, result.output))
2253
      raise errors.OpExecError(" ".join(errmsg))
2254

    
2255
  def Call(self, fn, *args):
2256
    """Call function while all daemons are stopped.
2257

2258
    @type fn: callable
2259
    @param fn: Function to be called
2260

2261
    """
2262
    # Pause watcher by acquiring an exclusive lock on watcher state file
2263
    self.feedback_fn("Blocking watcher")
2264
    watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2265
    try:
2266
      # TODO: Currently, this just blocks. There's no timeout.
2267
      # TODO: Should it be a shared lock?
2268
      watcher_block.Exclusive(blocking=True)
2269

    
2270
      # Stop master daemons, so that no new jobs can come in and all running
2271
      # ones are finished
2272
      self.feedback_fn("Stopping master daemons")
2273
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2274
      try:
2275
        # Stop daemons on all nodes
2276
        for node_name in self.online_nodes:
2277
          self.feedback_fn("Stopping daemons on %s" % node_name)
2278
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2279

    
2280
        # All daemons are shut down now
2281
        try:
2282
          return fn(self, *args)
2283
        except Exception, err:
2284
          _, errmsg = FormatError(err)
2285
          logging.exception("Caught exception")
2286
          self.feedback_fn(errmsg)
2287
          raise
2288
      finally:
2289
        # Start cluster again, master node last
2290
        for node_name in self.nonmaster_nodes + [self.master_node]:
2291
          self.feedback_fn("Starting daemons on %s" % node_name)
2292
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2293
    finally:
2294
      # Resume watcher
2295
      watcher_block.Close()
2296

    
2297

    
2298
def RunWhileClusterStopped(feedback_fn, fn, *args):
2299
  """Calls a function while all cluster daemons are stopped.
2300

2301
  @type feedback_fn: callable
2302
  @param feedback_fn: Feedback function
2303
  @type fn: callable
2304
  @param fn: Function to be called when daemons are stopped
2305

2306
  """
2307
  feedback_fn("Gathering cluster information")
2308

    
2309
  # This ensures we're running on the master daemon
2310
  cl = GetClient()
2311

    
2312
  (cluster_name, master_node) = \
2313
    cl.QueryConfigValues(["cluster_name", "master_node"])
2314

    
2315
  online_nodes = GetOnlineNodes([], cl=cl)
2316

    
2317
  # Don't keep a reference to the client. The master daemon will go away.
2318
  del cl
2319

    
2320
  assert master_node in online_nodes
2321

    
2322
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2323
                                       online_nodes).Call(fn, *args)
2324

    
2325

    
2326
def GenerateTable(headers, fields, separator, data,
2327
                  numfields=None, unitfields=None,
2328
                  units=None):
2329
  """Prints a table with headers and different fields.
2330

2331
  @type headers: dict
2332
  @param headers: dictionary mapping field names to headers for
2333
      the table
2334
  @type fields: list
2335
  @param fields: the field names corresponding to each row in
2336
      the data field
2337
  @param separator: the separator to be used; if this is None,
2338
      the default 'smart' algorithm is used which computes optimal
2339
      field width, otherwise just the separator is used between
2340
      each field
2341
  @type data: list
2342
  @param data: a list of lists, each sublist being one row to be output
2343
  @type numfields: list
2344
  @param numfields: a list with the fields that hold numeric
2345
      values and thus should be right-aligned
2346
  @type unitfields: list
2347
  @param unitfields: a list with the fields that hold numeric
2348
      values that should be formatted with the units field
2349
  @type units: string or None
2350
  @param units: the units we should use for formatting, or None for
2351
      automatic choice (human-readable for non-separator usage, otherwise
2352
      megabytes); this is a one-letter string
2353

2354
  """
2355
  if units is None:
2356
    if separator:
2357
      units = "m"
2358
    else:
2359
      units = "h"
2360

    
2361
  if numfields is None:
2362
    numfields = []
2363
  if unitfields is None:
2364
    unitfields = []
2365

    
2366
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2367
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2368

    
2369
  format_fields = []
2370
  for field in fields:
2371
    if headers and field not in headers:
2372
      # TODO: handle better unknown fields (either revert to old
2373
      # style of raising exception, or deal more intelligently with
2374
      # variable fields)
2375
      headers[field] = field
2376
    if separator is not None:
2377
      format_fields.append("%s")
2378
    elif numfields.Matches(field):
2379
      format_fields.append("%*s")
2380
    else:
2381
      format_fields.append("%-*s")
2382

    
2383
  if separator is None:
2384
    mlens = [0 for name in fields]
2385
    format_str = " ".join(format_fields)
2386
  else:
2387
    format_str = separator.replace("%", "%%").join(format_fields)
2388

    
2389
  for row in data:
2390
    if row is None:
2391
      continue
2392
    for idx, val in enumerate(row):
2393
      if unitfields.Matches(fields[idx]):
2394
        try:
2395
          val = int(val)
2396
        except (TypeError, ValueError):
2397
          pass
2398
        else:
2399
          val = row[idx] = utils.FormatUnit(val, units)
2400
      val = row[idx] = str(val)
2401
      if separator is None:
2402
        mlens[idx] = max(mlens[idx], len(val))
2403

    
2404
  result = []
2405
  if headers:
2406
    args = []
2407
    for idx, name in enumerate(fields):
2408
      hdr = headers[name]
2409
      if separator is None:
2410
        mlens[idx] = max(mlens[idx], len(hdr))
2411
        args.append(mlens[idx])
2412
      args.append(hdr)
2413
    result.append(format_str % tuple(args))
2414

    
2415
  if separator is None:
2416
    assert len(mlens) == len(fields)
2417

    
2418
    if fields and not numfields.Matches(fields[-1]):
2419
      mlens[-1] = 0
2420

    
2421
  for line in data:
2422
    args = []
2423
    if line is None:
2424
      line = ["-" for _ in fields]
2425
    for idx in range(len(fields)):
2426
      if separator is None:
2427
        args.append(mlens[idx])
2428
      args.append(line[idx])
2429
    result.append(format_str % tuple(args))
2430

    
2431
  return result
2432

    
2433

    
2434
def _FormatBool(value):
2435
  """Formats a boolean value as a string.
2436

2437
  """
2438
  if value:
2439
    return "Y"
2440
  return "N"
2441

    
2442

    
2443
#: Default formatting for query results; (callback, align right)
2444
_DEFAULT_FORMAT_QUERY = {
2445
  constants.QFT_TEXT: (str, False),
2446
  constants.QFT_BOOL: (_FormatBool, False),
2447
  constants.QFT_NUMBER: (str, True),
2448
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2449
  constants.QFT_OTHER: (str, False),
2450
  constants.QFT_UNKNOWN: (str, False),
2451
  }
2452

    
2453

    
2454
def _GetColumnFormatter(fdef, override, unit):
2455
  """Returns formatting function for a field.
2456

2457
  @type fdef: L{objects.QueryFieldDefinition}
2458
  @type override: dict
2459
  @param override: Dictionary for overriding field formatting functions,
2460
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2461
  @type unit: string
2462
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2463
  @rtype: tuple; (callable, bool)
2464
  @return: Returns the function to format a value (takes one parameter) and a
2465
    boolean for aligning the value on the right-hand side
2466

2467
  """
2468
  fmt = override.get(fdef.name, None)
2469
  if fmt is not None:
2470
    return fmt
2471

    
2472
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2473

    
2474
  if fdef.kind == constants.QFT_UNIT:
2475
    # Can't keep this information in the static dictionary
2476
    return (lambda value: utils.FormatUnit(value, unit), True)
2477

    
2478
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2479
  if fmt is not None:
2480
    return fmt
2481

    
2482
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2483

    
2484

    
2485
class _QueryColumnFormatter:
2486
  """Callable class for formatting fields of a query.
2487

2488
  """
2489
  def __init__(self, fn, status_fn, verbose):
2490
    """Initializes this class.
2491

2492
    @type fn: callable
2493
    @param fn: Formatting function
2494
    @type status_fn: callable
2495
    @param status_fn: Function to report fields' status
2496
    @type verbose: boolean
2497
    @param verbose: whether to use verbose field descriptions or not
2498

2499
    """
2500
    self._fn = fn
2501
    self._status_fn = status_fn
2502
    self._verbose = verbose
2503

    
2504
  def __call__(self, data):
2505
    """Returns a field's string representation.
2506

2507
    """
2508
    (status, value) = data
2509

    
2510
    # Report status
2511
    self._status_fn(status)
2512

    
2513
    if status == constants.RS_NORMAL:
2514
      return self._fn(value)
2515

    
2516
    assert value is None, \
2517
           "Found value %r for abnormal status %s" % (value, status)
2518

    
2519
    return FormatResultError(status, self._verbose)
2520

    
2521

    
2522
def FormatResultError(status, verbose):
2523
  """Formats result status other than L{constants.RS_NORMAL}.
2524

2525
  @param status: The result status
2526
  @type verbose: boolean
2527
  @param verbose: Whether to return the verbose text
2528
  @return: Text of result status
2529

2530
  """
2531
  assert status != constants.RS_NORMAL, \
2532
         "FormatResultError called with status equal to constants.RS_NORMAL"
2533
  try:
2534
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2535
  except KeyError:
2536
    raise NotImplementedError("Unknown status %s" % status)
2537
  else:
2538
    if verbose:
2539
      return verbose_text
2540
    return normal_text
2541

    
2542

    
2543
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2544
                      header=False, verbose=False):
2545
  """Formats data in L{objects.QueryResponse}.
2546

2547
  @type result: L{objects.QueryResponse}
2548
  @param result: result of query operation
2549
  @type unit: string
2550
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2551
    see L{utils.text.FormatUnit}
2552
  @type format_override: dict
2553
  @param format_override: Dictionary for overriding field formatting functions,
2554
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2555
  @type separator: string or None
2556
  @param separator: String used to separate fields
2557
  @type header: bool
2558
  @param header: Whether to output header row
2559
  @type verbose: boolean
2560
  @param verbose: whether to use verbose field descriptions or not
2561

2562
  """
2563
  if unit is None:
2564
    if separator:
2565
      unit = "m"
2566
    else:
2567
      unit = "h"
2568

    
2569
  if format_override is None:
2570
    format_override = {}
2571

    
2572
  stats = dict.fromkeys(constants.RS_ALL, 0)
2573

    
2574
  def _RecordStatus(status):
2575
    if status in stats:
2576
      stats[status] += 1
2577

    
2578
  columns = []
2579
  for fdef in result.fields:
2580
    assert fdef.title and fdef.name
2581
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2582
    columns.append(TableColumn(fdef.title,
2583
                               _QueryColumnFormatter(fn, _RecordStatus,
2584
                                                     verbose),
2585
                               align_right))
2586

    
2587
  table = FormatTable(result.data, columns, header, separator)
2588

    
2589
  # Collect statistics
2590
  assert len(stats) == len(constants.RS_ALL)
2591
  assert compat.all(count >= 0 for count in stats.values())
2592

    
2593
  # Determine overall status. If there was no data, unknown fields must be
2594
  # detected via the field definitions.
2595
  if (stats[constants.RS_UNKNOWN] or
2596
      (not result.data and _GetUnknownFields(result.fields))):
2597
    status = QR_UNKNOWN
2598
  elif compat.any(count > 0 for key, count in stats.items()
2599
                  if key != constants.RS_NORMAL):
2600
    status = QR_INCOMPLETE
2601
  else:
2602
    status = QR_NORMAL
2603

    
2604
  return (status, table)
2605

    
2606

    
2607
def _GetUnknownFields(fdefs):
2608
  """Returns list of unknown fields included in C{fdefs}.
2609

2610
  @type fdefs: list of L{objects.QueryFieldDefinition}
2611

2612
  """
2613
  return [fdef for fdef in fdefs
2614
          if fdef.kind == constants.QFT_UNKNOWN]
2615

    
2616

    
2617
def _WarnUnknownFields(fdefs):
2618
  """Prints a warning to stderr if a query included unknown fields.
2619

2620
  @type fdefs: list of L{objects.QueryFieldDefinition}
2621

2622
  """
2623
  unknown = _GetUnknownFields(fdefs)
2624
  if unknown:
2625
    ToStderr("Warning: Queried for unknown fields %s",
2626
             utils.CommaJoin(fdef.name for fdef in unknown))
2627
    return True
2628

    
2629
  return False
2630

    
2631

    
2632
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2633
                format_override=None, verbose=False, force_filter=False):
2634
  """Generic implementation for listing all items of a resource.
2635

2636
  @param resource: One of L{constants.QR_VIA_LUXI}
2637
  @type fields: list of strings
2638
  @param fields: List of fields to query for
2639
  @type names: list of strings
2640
  @param names: Names of items to query for
2641
  @type unit: string or None
2642
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2643
    None for automatic choice (human-readable for non-separator usage,
2644
    otherwise megabytes); this is a one-letter string
2645
  @type separator: string or None
2646
  @param separator: String used to separate fields
2647
  @type header: bool
2648
  @param header: Whether to show header row
2649
  @type force_filter: bool
2650
  @param force_filter: Whether to always treat names as filter
2651
  @type format_override: dict
2652
  @param format_override: Dictionary for overriding field formatting functions,
2653
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2654
  @type verbose: boolean
2655
  @param verbose: whether to use verbose field descriptions or not
2656

2657
  """
2658
  if cl is None:
2659
    cl = GetClient()
2660

    
2661
  if not names:
2662
    names = None
2663

    
2664
  filter_ = qlang.MakeFilter(names, force_filter)
2665

    
2666
  response = cl.Query(resource, fields, filter_)
2667

    
2668
  found_unknown = _WarnUnknownFields(response.fields)
2669

    
2670
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2671
                                     header=header,
2672
                                     format_override=format_override,
2673
                                     verbose=verbose)
2674

    
2675
  for line in data:
2676
    ToStdout(line)
2677

    
2678
  assert ((found_unknown and status == QR_UNKNOWN) or
2679
          (not found_unknown and status != QR_UNKNOWN))
2680

    
2681
  if status == QR_UNKNOWN:
2682
    return constants.EXIT_UNKNOWN_FIELD
2683

    
2684
  # TODO: Should the list command fail if not all data could be collected?
2685
  return constants.EXIT_SUCCESS
2686

    
2687

    
2688
def GenericListFields(resource, fields, separator, header, cl=None):
2689
  """Generic implementation for listing fields for a resource.
2690

2691
  @param resource: One of L{constants.QR_VIA_LUXI}
2692
  @type fields: list of strings
2693
  @param fields: List of fields to query for
2694
  @type separator: string or None
2695
  @param separator: String used to separate fields
2696
  @type header: bool
2697
  @param header: Whether to show header row
2698

2699
  """
2700
  if cl is None:
2701
    cl = GetClient()
2702

    
2703
  if not fields:
2704
    fields = None
2705

    
2706
  response = cl.QueryFields(resource, fields)
2707

    
2708
  found_unknown = _WarnUnknownFields(response.fields)
2709

    
2710
  columns = [
2711
    TableColumn("Name", str, False),
2712
    TableColumn("Title", str, False),
2713
    TableColumn("Description", str, False),
2714
    ]
2715

    
2716
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2717

    
2718
  for line in FormatTable(rows, columns, header, separator):
2719
    ToStdout(line)
2720

    
2721
  if found_unknown:
2722
    return constants.EXIT_UNKNOWN_FIELD
2723

    
2724
  return constants.EXIT_SUCCESS
2725

    
2726

    
2727
class TableColumn:
2728
  """Describes a column for L{FormatTable}.
2729

2730
  """
2731
  def __init__(self, title, fn, align_right):
2732
    """Initializes this class.
2733

2734
    @type title: string
2735
    @param title: Column title
2736
    @type fn: callable
2737
    @param fn: Formatting function
2738
    @type align_right: bool
2739
    @param align_right: Whether to align values on the right-hand side
2740

2741
    """
2742
    self.title = title
2743
    self.format = fn
2744
    self.align_right = align_right
2745

    
2746

    
2747
def _GetColFormatString(width, align_right):
2748
  """Returns the format string for a field.
2749

2750
  """
2751
  if align_right:
2752
    sign = ""
2753
  else:
2754
    sign = "-"
2755

    
2756
  return "%%%s%ss" % (sign, width)
2757

    
2758

    
2759
def FormatTable(rows, columns, header, separator):
2760
  """Formats data as a table.
2761

2762
  @type rows: list of lists
2763
  @param rows: Row data, one list per row
2764
  @type columns: list of L{TableColumn}
2765
  @param columns: Column descriptions
2766
  @type header: bool
2767
  @param header: Whether to show header row
2768
  @type separator: string or None
2769
  @param separator: String used to separate columns
2770

2771
  """
2772
  if header:
2773
    data = [[col.title for col in columns]]
2774
    colwidth = [len(col.title) for col in columns]
2775
  else:
2776
    data = []
2777
    colwidth = [0 for _ in columns]
2778

    
2779
  # Format row data
2780
  for row in rows:
2781
    assert len(row) == len(columns)
2782

    
2783
    formatted = [col.format(value) for value, col in zip(row, columns)]
2784

    
2785
    if separator is None:
2786
      # Update column widths
2787
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2788
        # Modifying a list's items while iterating is fine
2789
        colwidth[idx] = max(oldwidth, len(value))
2790

    
2791
    data.append(formatted)
2792

    
2793
  if separator is not None:
2794
    # Return early if a separator is used
2795
    return [separator.join(row) for row in data]
2796

    
2797
  if columns and not columns[-1].align_right:
2798
    # Avoid unnecessary spaces at end of line
2799
    colwidth[-1] = 0
2800

    
2801
  # Build format string
2802
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2803
                  for col, width in zip(columns, colwidth)])
2804

    
2805
  return [fmt % tuple(row) for row in data]
2806

    
2807

    
2808
def FormatTimestamp(ts):
2809
  """Formats a given timestamp.
2810

2811
  @type ts: timestamp
2812
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2813

2814
  @rtype: string
2815
  @return: a string with the formatted timestamp
2816

2817
  """
2818
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2819
    return "?"
2820
  sec, usec = ts
2821
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2822

    
2823

    
2824
def ParseTimespec(value):
2825
  """Parse a time specification.
2826

2827
  The following suffixed will be recognized:
2828

2829
    - s: seconds
2830
    - m: minutes
2831
    - h: hours
2832
    - d: day
2833
    - w: weeks
2834

2835
  Without any suffix, the value will be taken to be in seconds.
2836

2837
  """
2838
  value = str(value)
2839
  if not value:
2840
    raise errors.OpPrereqError("Empty time specification passed")
2841
  suffix_map = {
2842
    "s": 1,
2843
    "m": 60,
2844
    "h": 3600,
2845
    "d": 86400,
2846
    "w": 604800,
2847
    }
2848
  if value[-1] not in suffix_map:
2849
    try:
2850
      value = int(value)
2851
    except (TypeError, ValueError):
2852
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2853
  else:
2854
    multiplier = suffix_map[value[-1]]
2855
    value = value[:-1]
2856
    if not value: # no data left after stripping the suffix
2857
      raise errors.OpPrereqError("Invalid time specification (only"
2858
                                 " suffix passed)")
2859
    try:
2860
      value = int(value) * multiplier
2861
    except (TypeError, ValueError):
2862
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2863
  return value
2864

    
2865

    
2866
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2867
                   filter_master=False, nodegroup=None):
2868
  """Returns the names of online nodes.
2869

2870
  This function will also log a warning on stderr with the names of
2871
  the online nodes.
2872

2873
  @param nodes: if not empty, use only this subset of nodes (minus the
2874
      offline ones)
2875
  @param cl: if not None, luxi client to use
2876
  @type nowarn: boolean
2877
  @param nowarn: by default, this function will output a note with the
2878
      offline nodes that are skipped; if this parameter is True the
2879
      note is not displayed
2880
  @type secondary_ips: boolean
2881
  @param secondary_ips: if True, return the secondary IPs instead of the
2882
      names, useful for doing network traffic over the replication interface
2883
      (if any)
2884
  @type filter_master: boolean
2885
  @param filter_master: if True, do not return the master node in the list
2886
      (useful in coordination with secondary_ips where we cannot check our
2887
      node name against the list)
2888
  @type nodegroup: string
2889
  @param nodegroup: If set, only return nodes in this node group
2890

2891
  """
2892
  if cl is None:
2893
    cl = GetClient()
2894

    
2895
  filter_ = []
2896

    
2897
  if nodes:
2898
    filter_.append(qlang.MakeSimpleFilter("name", nodes))
2899

    
2900
  if nodegroup is not None:
2901
    filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2902
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2903

    
2904
  if filter_master:
2905
    filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2906

    
2907
  if filter_:
2908
    if len(filter_) > 1:
2909
      final_filter = [qlang.OP_AND] + filter_
2910
    else:
2911
      assert len(filter_) == 1
2912
      final_filter = filter_[0]
2913
  else:
2914
    final_filter = None
2915

    
2916
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2917

    
2918
  def _IsOffline(row):
2919
    (_, (_, offline), _) = row
2920
    return offline
2921

    
2922
  def _GetName(row):
2923
    ((_, name), _, _) = row
2924
    return name
2925

    
2926
  def _GetSip(row):
2927
    (_, _, (_, sip)) = row
2928
    return sip
2929

    
2930
  (offline, online) = compat.partition(result.data, _IsOffline)
2931

    
2932
  if offline and not nowarn:
2933
    ToStderr("Note: skipping offline node(s): %s" %
2934
             utils.CommaJoin(map(_GetName, offline)))
2935

    
2936
  if secondary_ips:
2937
    fn = _GetSip
2938
  else:
2939
    fn = _GetName
2940

    
2941
  return map(fn, online)
2942

    
2943

    
2944
def _ToStream(stream, txt, *args):
2945
  """Write a message to a stream, bypassing the logging system
2946

2947
  @type stream: file object
2948
  @param stream: the file to which we should write
2949
  @type txt: str
2950
  @param txt: the message
2951

2952
  """
2953
  try:
2954
    if args:
2955
      args = tuple(args)
2956
      stream.write(txt % args)
2957
    else:
2958
      stream.write(txt)
2959
    stream.write("\n")
2960
    stream.flush()
2961
  except IOError, err:
2962
    if err.errno == errno.EPIPE:
2963
      # our terminal went away, we'll exit
2964
      sys.exit(constants.EXIT_FAILURE)
2965
    else:
2966
      raise
2967

    
2968

    
2969
def ToStdout(txt, *args):
2970
  """Write a message to stdout only, bypassing the logging system
2971

2972
  This is just a wrapper over _ToStream.
2973

2974
  @type txt: str
2975
  @param txt: the message
2976

2977
  """
2978
  _ToStream(sys.stdout, txt, *args)
2979

    
2980

    
2981
def ToStderr(txt, *args):
2982
  """Write a message to stderr only, bypassing the logging system
2983

2984
  This is just a wrapper over _ToStream.
2985

2986
  @type txt: str
2987
  @param txt: the message
2988

2989
  """
2990
  _ToStream(sys.stderr, txt, *args)
2991

    
2992

    
2993
class JobExecutor(object):
2994
  """Class which manages the submission and execution of multiple jobs.
2995

2996
  Note that instances of this class should not be reused between
2997
  GetResults() calls.
2998

2999
  """
3000
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3001
    self.queue = []
3002
    if cl is None:
3003
      cl = GetClient()
3004
    self.cl = cl
3005
    self.verbose = verbose
3006
    self.jobs = []
3007
    self.opts = opts
3008
    self.feedback_fn = feedback_fn
3009
    self._counter = itertools.count()
3010

    
3011
  @staticmethod
3012
  def _IfName(name, fmt):
3013
    """Helper function for formatting name.
3014

3015
    """
3016
    if name:
3017
      return fmt % name
3018

    
3019
    return ""
3020

    
3021
  def QueueJob(self, name, *ops):
3022
    """Record a job for later submit.
3023

3024
    @type name: string
3025
    @param name: a description of the job, will be used in WaitJobSet
3026

3027
    """
3028
    SetGenericOpcodeOpts(ops, self.opts)
3029
    self.queue.append((self._counter.next(), name, ops))
3030

    
3031
  def AddJobId(self, name, status, job_id):
3032
    """Adds a job ID to the internal queue.
3033

3034
    """
3035
    self.jobs.append((self._counter.next(), status, job_id, name))
3036

    
3037
  def SubmitPending(self, each=False):
3038
    """Submit all pending jobs.
3039

3040
    """
3041
    if each:
3042
      results = []
3043
      for (_, _, ops) in self.queue:
3044
        # SubmitJob will remove the success status, but raise an exception if
3045
        # the submission fails, so we'll notice that anyway.
3046
        results.append([True, self.cl.SubmitJob(ops)])
3047
    else:
3048
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3049
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3050
      self.jobs.append((idx, status, data, name))
3051

    
3052
  def _ChooseJob(self):
3053
    """Choose a non-waiting/queued job to poll next.
3054

3055
    """
3056
    assert self.jobs, "_ChooseJob called with empty job list"
3057

    
3058
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3059
    assert result
3060

    
3061
    for job_data, status in zip(self.jobs, result):
3062
      if (isinstance(status, list) and status and
3063
          status[0] in (constants.JOB_STATUS_QUEUED,
3064
                        constants.JOB_STATUS_WAITING,
3065
                        constants.JOB_STATUS_CANCELING)):
3066
        # job is still present and waiting
3067
        continue
3068
      # good candidate found (either running job or lost job)
3069
      self.jobs.remove(job_data)
3070
      return job_data
3071

    
3072
    # no job found
3073
    return self.jobs.pop(0)
3074

    
3075
  def GetResults(self):
3076
    """Wait for and return the results of all jobs.
3077

3078
    @rtype: list
3079
    @return: list of tuples (success, job results), in the same order
3080
        as the submitted jobs; if a job has failed, instead of the result
3081
        there will be the error message
3082

3083
    """
3084
    if not self.jobs:
3085
      self.SubmitPending()
3086
    results = []
3087
    if self.verbose:
3088
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3089
      if ok_jobs:
3090
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3091

    
3092
    # first, remove any non-submitted jobs
3093
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3094
    for idx, _, jid, name in failures:
3095
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3096
      results.append((idx, False, jid))
3097

    
3098
    while self.jobs:
3099
      (idx, _, jid, name) = self._ChooseJob()
3100
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3101
      try:
3102
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3103
        success = True
3104
      except errors.JobLost, err:
3105
        _, job_result = FormatError(err)
3106
        ToStderr("Job %s%s has been archived, cannot check its result",
3107
                 jid, self._IfName(name, " for %s"))
3108
        success = False
3109
      except (errors.GenericError, luxi.ProtocolError), err:
3110
        _, job_result = FormatError(err)
3111
        success = False
3112
        # the error message will always be shown, verbose or not
3113
        ToStderr("Job %s%s has failed: %s",
3114
                 jid, self._IfName(name, " for %s"), job_result)
3115

    
3116
      results.append((idx, success, job_result))
3117

    
3118
    # sort based on the index, then drop it
3119
    results.sort()
3120
    results = [i[1:] for i in results]
3121

    
3122
    return results
3123

    
3124
  def WaitOrShow(self, wait):
3125
    """Wait for job results or only print the job IDs.
3126

3127
    @type wait: boolean
3128
    @param wait: whether to wait or not
3129

3130
    """
3131
    if wait:
3132
      return self.GetResults()
3133
    else:
3134
      if not self.jobs:
3135
        self.SubmitPending()
3136
      for _, status, result, name in self.jobs:
3137
        if status:
3138
          ToStdout("%s: %s", result, name)
3139
        else:
3140
          ToStderr("Failure for %s: %s", name, result)
3141
      return [row[1:3] for row in self.jobs]
3142

    
3143

    
3144
def FormatParameterDict(buf, param_dict, actual, level=1):
3145
  """Formats a parameter dictionary.
3146

3147
  @type buf: L{StringIO}
3148
  @param buf: the buffer into which to write
3149
  @type param_dict: dict
3150
  @param param_dict: the own parameters
3151
  @type actual: dict
3152
  @param actual: the current parameter set (including defaults)
3153
  @param level: Level of indent
3154

3155
  """
3156
  indent = "  " * level
3157
  for key in sorted(actual):
3158
    val = param_dict.get(key, "default (%s)" % actual[key])
3159
    buf.write("%s- %s: %s\n" % (indent, key, val))
3160

    
3161

    
3162
def ConfirmOperation(names, list_type, text, extra=""):
3163
  """Ask the user to confirm an operation on a list of list_type.
3164

3165
  This function is used to request confirmation for doing an operation
3166
  on a given list of list_type.
3167

3168
  @type names: list
3169
  @param names: the list of names that we display when
3170
      we ask for confirmation
3171
  @type list_type: str
3172
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3173
  @type text: str
3174
  @param text: the operation that the user should confirm
3175
  @rtype: boolean
3176
  @return: True or False depending on user's confirmation.
3177

3178
  """
3179
  count = len(names)
3180
  msg = ("The %s will operate on %d %s.\n%s"
3181
         "Do you want to continue?" % (text, count, list_type, extra))
3182
  affected = (("\nAffected %s:\n" % list_type) +
3183
              "\n".join(["  %s" % name for name in names]))
3184

    
3185
  choices = [("y", True, "Yes, execute the %s" % text),
3186
             ("n", False, "No, abort the %s" % text)]
3187

    
3188
  if count > 20:
3189
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3190
    question = msg
3191
  else:
3192
    question = msg + affected
3193

    
3194
  choice = AskUser(question, choices)
3195
  if choice == "v":
3196
    choices.pop(1)
3197
    choice = AskUser(msg + affected, choices)
3198
  return choice