Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ b6267745

History | View | Annotate | Download (103.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
from cStringIO import StringIO
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import opcodes
38
from ganeti import luxi
39
from ganeti import ssconf
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45

    
46
from optparse import (OptionParser, TitledHelpFormatter,
47
                      Option, OptionValueError)
48

    
49

    
50
__all__ = [
51
  # Command line options
52
  "ADD_UIDS_OPT",
53
  "ALLOCATABLE_OPT",
54
  "ALLOC_POLICY_OPT",
55
  "ALL_OPT",
56
  "ALLOW_FAILOVER_OPT",
57
  "AUTO_PROMOTE_OPT",
58
  "AUTO_REPLACE_OPT",
59
  "BACKEND_OPT",
60
  "BLK_OS_OPT",
61
  "CAPAB_MASTER_OPT",
62
  "CAPAB_VM_OPT",
63
  "CLEANUP_OPT",
64
  "CLUSTER_DOMAIN_SECRET_OPT",
65
  "CONFIRM_OPT",
66
  "CP_SIZE_OPT",
67
  "DEBUG_OPT",
68
  "DEBUG_SIMERR_OPT",
69
  "DISKIDX_OPT",
70
  "DISK_OPT",
71
  "DISK_TEMPLATE_OPT",
72
  "DRAINED_OPT",
73
  "DRY_RUN_OPT",
74
  "DRBD_HELPER_OPT",
75
  "DST_NODE_OPT",
76
  "EARLY_RELEASE_OPT",
77
  "ENABLED_HV_OPT",
78
  "ERROR_CODES_OPT",
79
  "FIELDS_OPT",
80
  "FILESTORE_DIR_OPT",
81
  "FILESTORE_DRIVER_OPT",
82
  "FORCE_FILTER_OPT",
83
  "FORCE_OPT",
84
  "FORCE_VARIANT_OPT",
85
  "GLOBAL_FILEDIR_OPT",
86
  "HID_OS_OPT",
87
  "GLOBAL_SHARED_FILEDIR_OPT",
88
  "HVLIST_OPT",
89
  "HVOPTS_OPT",
90
  "HYPERVISOR_OPT",
91
  "IALLOCATOR_OPT",
92
  "DEFAULT_IALLOCATOR_OPT",
93
  "IDENTIFY_DEFAULTS_OPT",
94
  "IGNORE_CONSIST_OPT",
95
  "IGNORE_FAILURES_OPT",
96
  "IGNORE_OFFLINE_OPT",
97
  "IGNORE_REMOVE_FAILURES_OPT",
98
  "IGNORE_SECONDARIES_OPT",
99
  "IGNORE_SIZE_OPT",
100
  "INTERVAL_OPT",
101
  "MAC_PREFIX_OPT",
102
  "MAINTAIN_NODE_HEALTH_OPT",
103
  "MASTER_NETDEV_OPT",
104
  "MC_OPT",
105
  "MIGRATION_MODE_OPT",
106
  "NET_OPT",
107
  "NEW_CLUSTER_CERT_OPT",
108
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
109
  "NEW_CONFD_HMAC_KEY_OPT",
110
  "NEW_RAPI_CERT_OPT",
111
  "NEW_SECONDARY_OPT",
112
  "NEW_SPICE_CERT_OPT",
113
  "NIC_PARAMS_OPT",
114
  "NODE_FORCE_JOIN_OPT",
115
  "NODE_LIST_OPT",
116
  "NODE_PLACEMENT_OPT",
117
  "NODEGROUP_OPT",
118
  "NODE_PARAMS_OPT",
119
  "NODE_POWERED_OPT",
120
  "NODRBD_STORAGE_OPT",
121
  "NOHDR_OPT",
122
  "NOIPCHECK_OPT",
123
  "NO_INSTALL_OPT",
124
  "NONAMECHECK_OPT",
125
  "NOLVM_STORAGE_OPT",
126
  "NOMODIFY_ETCHOSTS_OPT",
127
  "NOMODIFY_SSH_SETUP_OPT",
128
  "NONICS_OPT",
129
  "NONLIVE_OPT",
130
  "NONPLUS1_OPT",
131
  "NOSHUTDOWN_OPT",
132
  "NOSTART_OPT",
133
  "NOSSH_KEYCHECK_OPT",
134
  "NOVOTING_OPT",
135
  "NO_REMEMBER_OPT",
136
  "NWSYNC_OPT",
137
  "ON_PRIMARY_OPT",
138
  "ON_SECONDARY_OPT",
139
  "OFFLINE_OPT",
140
  "OSPARAMS_OPT",
141
  "OS_OPT",
142
  "OS_SIZE_OPT",
143
  "OOB_TIMEOUT_OPT",
144
  "POWER_DELAY_OPT",
145
  "PREALLOC_WIPE_DISKS_OPT",
146
  "PRIMARY_IP_VERSION_OPT",
147
  "PRIMARY_ONLY_OPT",
148
  "PRIORITY_OPT",
149
  "RAPI_CERT_OPT",
150
  "READD_OPT",
151
  "REBOOT_TYPE_OPT",
152
  "REMOVE_INSTANCE_OPT",
153
  "REMOVE_UIDS_OPT",
154
  "RESERVED_LVS_OPT",
155
  "ROMAN_OPT",
156
  "SECONDARY_IP_OPT",
157
  "SECONDARY_ONLY_OPT",
158
  "SELECT_OS_OPT",
159
  "SEP_OPT",
160
  "SHOWCMD_OPT",
161
  "SHUTDOWN_TIMEOUT_OPT",
162
  "SINGLE_NODE_OPT",
163
  "SPICE_CACERT_OPT",
164
  "SPICE_CERT_OPT",
165
  "SRC_DIR_OPT",
166
  "SRC_NODE_OPT",
167
  "SUBMIT_OPT",
168
  "STARTUP_PAUSED_OPT",
169
  "STATIC_OPT",
170
  "SYNC_OPT",
171
  "TAG_ADD_OPT",
172
  "TAG_SRC_OPT",
173
  "TIMEOUT_OPT",
174
  "TO_GROUP_OPT",
175
  "UIDPOOL_OPT",
176
  "USEUNITS_OPT",
177
  "USE_REPL_NET_OPT",
178
  "VERBOSE_OPT",
179
  "VG_NAME_OPT",
180
  "YES_DOIT_OPT",
181
  # Generic functions for CLI programs
182
  "ConfirmOperation",
183
  "GenericMain",
184
  "GenericInstanceCreate",
185
  "GenericList",
186
  "GenericListFields",
187
  "GetClient",
188
  "GetOnlineNodes",
189
  "JobExecutor",
190
  "JobSubmittedException",
191
  "ParseTimespec",
192
  "RunWhileClusterStopped",
193
  "SubmitOpCode",
194
  "SubmitOrSend",
195
  "UsesRPC",
196
  # Formatting functions
197
  "ToStderr", "ToStdout",
198
  "FormatError",
199
  "FormatQueryResult",
200
  "FormatParameterDict",
201
  "GenerateTable",
202
  "AskUser",
203
  "FormatTimestamp",
204
  "FormatLogMessage",
205
  # Tags functions
206
  "ListTags",
207
  "AddTags",
208
  "RemoveTags",
209
  # command line options support infrastructure
210
  "ARGS_MANY_INSTANCES",
211
  "ARGS_MANY_NODES",
212
  "ARGS_MANY_GROUPS",
213
  "ARGS_NONE",
214
  "ARGS_ONE_INSTANCE",
215
  "ARGS_ONE_NODE",
216
  "ARGS_ONE_GROUP",
217
  "ARGS_ONE_OS",
218
  "ArgChoice",
219
  "ArgCommand",
220
  "ArgFile",
221
  "ArgGroup",
222
  "ArgHost",
223
  "ArgInstance",
224
  "ArgJobId",
225
  "ArgNode",
226
  "ArgOs",
227
  "ArgSuggest",
228
  "ArgUnknown",
229
  "OPT_COMPL_INST_ADD_NODES",
230
  "OPT_COMPL_MANY_NODES",
231
  "OPT_COMPL_ONE_IALLOCATOR",
232
  "OPT_COMPL_ONE_INSTANCE",
233
  "OPT_COMPL_ONE_NODE",
234
  "OPT_COMPL_ONE_NODEGROUP",
235
  "OPT_COMPL_ONE_OS",
236
  "cli_option",
237
  "SplitNodeOption",
238
  "CalculateOSNames",
239
  "ParseFields",
240
  "COMMON_CREATE_OPTS",
241
  ]
242

    
243
NO_PREFIX = "no_"
244
UN_PREFIX = "-"
245

    
246
#: Priorities (sorted)
247
_PRIORITY_NAMES = [
248
  ("low", constants.OP_PRIO_LOW),
249
  ("normal", constants.OP_PRIO_NORMAL),
250
  ("high", constants.OP_PRIO_HIGH),
251
  ]
252

    
253
#: Priority dictionary for easier lookup
254
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
255
# we migrate to Python 2.6
256
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
257

    
258
# Query result status for clients
259
(QR_NORMAL,
260
 QR_UNKNOWN,
261
 QR_INCOMPLETE) = range(3)
262

    
263

    
264
class _Argument:
265
  def __init__(self, min=0, max=None): # pylint: disable=W0622
266
    self.min = min
267
    self.max = max
268

    
269
  def __repr__(self):
270
    return ("<%s min=%s max=%s>" %
271
            (self.__class__.__name__, self.min, self.max))
272

    
273

    
274
class ArgSuggest(_Argument):
275
  """Suggesting argument.
276

277
  Value can be any of the ones passed to the constructor.
278

279
  """
280
  # pylint: disable=W0622
281
  def __init__(self, min=0, max=None, choices=None):
282
    _Argument.__init__(self, min=min, max=max)
283
    self.choices = choices
284

    
285
  def __repr__(self):
286
    return ("<%s min=%s max=%s choices=%r>" %
287
            (self.__class__.__name__, self.min, self.max, self.choices))
288

    
289

    
290
class ArgChoice(ArgSuggest):
291
  """Choice argument.
292

293
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
294
  but value must be one of the choices.
295

296
  """
297

    
298

    
299
class ArgUnknown(_Argument):
300
  """Unknown argument to program (e.g. determined at runtime).
301

302
  """
303

    
304

    
305
class ArgInstance(_Argument):
306
  """Instances argument.
307

308
  """
309

    
310

    
311
class ArgNode(_Argument):
312
  """Node argument.
313

314
  """
315

    
316

    
317
class ArgGroup(_Argument):
318
  """Node group argument.
319

320
  """
321

    
322

    
323
class ArgJobId(_Argument):
324
  """Job ID argument.
325

326
  """
327

    
328

    
329
class ArgFile(_Argument):
330
  """File path argument.
331

332
  """
333

    
334

    
335
class ArgCommand(_Argument):
336
  """Command argument.
337

338
  """
339

    
340

    
341
class ArgHost(_Argument):
342
  """Host argument.
343

344
  """
345

    
346

    
347
class ArgOs(_Argument):
348
  """OS argument.
349

350
  """
351

    
352

    
353
ARGS_NONE = []
354
ARGS_MANY_INSTANCES = [ArgInstance()]
355
ARGS_MANY_NODES = [ArgNode()]
356
ARGS_MANY_GROUPS = [ArgGroup()]
357
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
358
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
359
# TODO
360
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
361
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
362

    
363

    
364
def _ExtractTagsObject(opts, args):
365
  """Extract the tag type object.
366

367
  Note that this function will modify its args parameter.
368

369
  """
370
  if not hasattr(opts, "tag_type"):
371
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
372
  kind = opts.tag_type
373
  if kind == constants.TAG_CLUSTER:
374
    retval = kind, kind
375
  elif kind in (constants.TAG_NODEGROUP,
376
                constants.TAG_NODE,
377
                constants.TAG_INSTANCE):
378
    if not args:
379
      raise errors.OpPrereqError("no arguments passed to the command")
380
    name = args.pop(0)
381
    retval = kind, name
382
  else:
383
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
384
  return retval
385

    
386

    
387
def _ExtendTags(opts, args):
388
  """Extend the args if a source file has been given.
389

390
  This function will extend the tags with the contents of the file
391
  passed in the 'tags_source' attribute of the opts parameter. A file
392
  named '-' will be replaced by stdin.
393

394
  """
395
  fname = opts.tags_source
396
  if fname is None:
397
    return
398
  if fname == "-":
399
    new_fh = sys.stdin
400
  else:
401
    new_fh = open(fname, "r")
402
  new_data = []
403
  try:
404
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
405
    # because of python bug 1633941
406
    while True:
407
      line = new_fh.readline()
408
      if not line:
409
        break
410
      new_data.append(line.strip())
411
  finally:
412
    new_fh.close()
413
  args.extend(new_data)
414

    
415

    
416
def ListTags(opts, args):
417
  """List the tags on a given object.
418

419
  This is a generic implementation that knows how to deal with all
420
  three cases of tag objects (cluster, node, instance). The opts
421
  argument is expected to contain a tag_type field denoting what
422
  object type we work on.
423

424
  """
425
  kind, name = _ExtractTagsObject(opts, args)
426
  cl = GetClient()
427
  result = cl.QueryTags(kind, name)
428
  result = list(result)
429
  result.sort()
430
  for tag in result:
431
    ToStdout(tag)
432

    
433

    
434
def AddTags(opts, args):
435
  """Add tags on a given object.
436

437
  This is a generic implementation that knows how to deal with all
438
  three cases of tag objects (cluster, node, instance). The opts
439
  argument is expected to contain a tag_type field denoting what
440
  object type we work on.
441

442
  """
443
  kind, name = _ExtractTagsObject(opts, args)
444
  _ExtendTags(opts, args)
445
  if not args:
446
    raise errors.OpPrereqError("No tags to be added")
447
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
448
  SubmitOpCode(op, opts=opts)
449

    
450

    
451
def RemoveTags(opts, args):
452
  """Remove tags from a given object.
453

454
  This is a generic implementation that knows how to deal with all
455
  three cases of tag objects (cluster, node, instance). The opts
456
  argument is expected to contain a tag_type field denoting what
457
  object type we work on.
458

459
  """
460
  kind, name = _ExtractTagsObject(opts, args)
461
  _ExtendTags(opts, args)
462
  if not args:
463
    raise errors.OpPrereqError("No tags to be removed")
464
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
465
  SubmitOpCode(op, opts=opts)
466

    
467

    
468
def check_unit(option, opt, value): # pylint: disable=W0613
469
  """OptParsers custom converter for units.
470

471
  """
472
  try:
473
    return utils.ParseUnit(value)
474
  except errors.UnitParseError, err:
475
    raise OptionValueError("option %s: %s" % (opt, err))
476

    
477

    
478
def _SplitKeyVal(opt, data):
479
  """Convert a KeyVal string into a dict.
480

481
  This function will convert a key=val[,...] string into a dict. Empty
482
  values will be converted specially: keys which have the prefix 'no_'
483
  will have the value=False and the prefix stripped, the others will
484
  have value=True.
485

486
  @type opt: string
487
  @param opt: a string holding the option name for which we process the
488
      data, used in building error messages
489
  @type data: string
490
  @param data: a string of the format key=val,key=val,...
491
  @rtype: dict
492
  @return: {key=val, key=val}
493
  @raises errors.ParameterError: if there are duplicate keys
494

495
  """
496
  kv_dict = {}
497
  if data:
498
    for elem in utils.UnescapeAndSplit(data, sep=","):
499
      if "=" in elem:
500
        key, val = elem.split("=", 1)
501
      else:
502
        if elem.startswith(NO_PREFIX):
503
          key, val = elem[len(NO_PREFIX):], False
504
        elif elem.startswith(UN_PREFIX):
505
          key, val = elem[len(UN_PREFIX):], None
506
        else:
507
          key, val = elem, True
508
      if key in kv_dict:
509
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
510
                                    (key, opt))
511
      kv_dict[key] = val
512
  return kv_dict
513

    
514

    
515
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
516
  """Custom parser for ident:key=val,key=val options.
517

518
  This will store the parsed values as a tuple (ident, {key: val}). As such,
519
  multiple uses of this option via action=append is possible.
520

521
  """
522
  if ":" not in value:
523
    ident, rest = value, ""
524
  else:
525
    ident, rest = value.split(":", 1)
526

    
527
  if ident.startswith(NO_PREFIX):
528
    if rest:
529
      msg = "Cannot pass options when removing parameter groups: %s" % value
530
      raise errors.ParameterError(msg)
531
    retval = (ident[len(NO_PREFIX):], False)
532
  elif ident.startswith(UN_PREFIX):
533
    if rest:
534
      msg = "Cannot pass options when removing parameter groups: %s" % value
535
      raise errors.ParameterError(msg)
536
    retval = (ident[len(UN_PREFIX):], None)
537
  else:
538
    kv_dict = _SplitKeyVal(opt, rest)
539
    retval = (ident, kv_dict)
540
  return retval
541

    
542

    
543
def check_key_val(option, opt, value):  # pylint: disable=W0613
544
  """Custom parser class for key=val,key=val options.
545

546
  This will store the parsed values as a dict {key: val}.
547

548
  """
549
  return _SplitKeyVal(opt, value)
550

    
551

    
552
def check_bool(option, opt, value): # pylint: disable=W0613
553
  """Custom parser for yes/no options.
554

555
  This will store the parsed value as either True or False.
556

557
  """
558
  value = value.lower()
559
  if value == constants.VALUE_FALSE or value == "no":
560
    return False
561
  elif value == constants.VALUE_TRUE or value == "yes":
562
    return True
563
  else:
564
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
565

    
566

    
567
# completion_suggestion is normally a list. Using numeric values not evaluating
568
# to False for dynamic completion.
569
(OPT_COMPL_MANY_NODES,
570
 OPT_COMPL_ONE_NODE,
571
 OPT_COMPL_ONE_INSTANCE,
572
 OPT_COMPL_ONE_OS,
573
 OPT_COMPL_ONE_IALLOCATOR,
574
 OPT_COMPL_INST_ADD_NODES,
575
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
576

    
577
OPT_COMPL_ALL = frozenset([
578
  OPT_COMPL_MANY_NODES,
579
  OPT_COMPL_ONE_NODE,
580
  OPT_COMPL_ONE_INSTANCE,
581
  OPT_COMPL_ONE_OS,
582
  OPT_COMPL_ONE_IALLOCATOR,
583
  OPT_COMPL_INST_ADD_NODES,
584
  OPT_COMPL_ONE_NODEGROUP,
585
  ])
586

    
587

    
588
class CliOption(Option):
589
  """Custom option class for optparse.
590

591
  """
592
  ATTRS = Option.ATTRS + [
593
    "completion_suggest",
594
    ]
595
  TYPES = Option.TYPES + (
596
    "identkeyval",
597
    "keyval",
598
    "unit",
599
    "bool",
600
    )
601
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
602
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
603
  TYPE_CHECKER["keyval"] = check_key_val
604
  TYPE_CHECKER["unit"] = check_unit
605
  TYPE_CHECKER["bool"] = check_bool
606

    
607

    
608
# optparse.py sets make_option, so we do it for our own option class, too
609
cli_option = CliOption
610

    
611

    
612
_YORNO = "yes|no"
613

    
614
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
615
                       help="Increase debugging level")
616

    
617
NOHDR_OPT = cli_option("--no-headers", default=False,
618
                       action="store_true", dest="no_headers",
619
                       help="Don't display column headers")
620

    
621
SEP_OPT = cli_option("--separator", default=None,
622
                     action="store", dest="separator",
623
                     help=("Separator between output fields"
624
                           " (defaults to one space)"))
625

    
626
USEUNITS_OPT = cli_option("--units", default=None,
627
                          dest="units", choices=("h", "m", "g", "t"),
628
                          help="Specify units for output (one of h/m/g/t)")
629

    
630
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
631
                        type="string", metavar="FIELDS",
632
                        help="Comma separated list of output fields")
633

    
634
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
635
                       default=False, help="Force the operation")
636

    
637
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
638
                         default=False, help="Do not require confirmation")
639

    
640
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
641
                                  action="store_true", default=False,
642
                                  help=("Ignore offline nodes and do as much"
643
                                        " as possible"))
644

    
645
TAG_ADD_OPT = cli_option("--tags", dest="tags",
646
                         default=None, help="Comma-separated list of instance"
647
                                            " tags")
648

    
649
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
650
                         default=None, help="File with tag names")
651

    
652
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
653
                        default=False, action="store_true",
654
                        help=("Submit the job and return the job ID, but"
655
                              " don't wait for the job to finish"))
656

    
657
SYNC_OPT = cli_option("--sync", dest="do_locking",
658
                      default=False, action="store_true",
659
                      help=("Grab locks while doing the queries"
660
                            " in order to ensure more consistent results"))
661

    
662
DRY_RUN_OPT = cli_option("--dry-run", default=False,
663
                         action="store_true",
664
                         help=("Do not execute the operation, just run the"
665
                               " check steps and verify it it could be"
666
                               " executed"))
667

    
668
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
669
                         action="store_true",
670
                         help="Increase the verbosity of the operation")
671

    
672
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
673
                              action="store_true", dest="simulate_errors",
674
                              help="Debugging option that makes the operation"
675
                              " treat most runtime checks as failed")
676

    
677
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
678
                        default=True, action="store_false",
679
                        help="Don't wait for sync (DANGEROUS!)")
680

    
681
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
682
                               help=("Custom disk setup (%s)" %
683
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
684
                               default=None, metavar="TEMPL",
685
                               choices=list(constants.DISK_TEMPLATES))
686

    
687
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
688
                        help="Do not create any network cards for"
689
                        " the instance")
690

    
691
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
692
                               help="Relative path under default cluster-wide"
693
                               " file storage dir to store file-based disks",
694
                               default=None, metavar="<DIR>")
695

    
696
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
697
                                  help="Driver to use for image files",
698
                                  default="loop", metavar="<DRIVER>",
699
                                  choices=list(constants.FILE_DRIVER))
700

    
701
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
702
                            help="Select nodes for the instance automatically"
703
                            " using the <NAME> iallocator plugin",
704
                            default=None, type="string",
705
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
706

    
707
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
708
                            metavar="<NAME>",
709
                            help="Set the default instance allocator plugin",
710
                            default=None, type="string",
711
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
712

    
713
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
714
                    metavar="<os>",
715
                    completion_suggest=OPT_COMPL_ONE_OS)
716

    
717
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
718
                         type="keyval", default={},
719
                         help="OS parameters")
720

    
721
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
722
                               action="store_true", default=False,
723
                               help="Force an unknown variant")
724

    
725
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
726
                            action="store_true", default=False,
727
                            help="Do not install the OS (will"
728
                            " enable no-start)")
729

    
730
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
731
                         type="keyval", default={},
732
                         help="Backend parameters")
733

    
734
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
735
                        default={}, dest="hvparams",
736
                        help="Hypervisor parameters")
737

    
738
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
739
                            help="Hypervisor and hypervisor options, in the"
740
                            " format hypervisor:option=value,option=value,...",
741
                            default=None, type="identkeyval")
742

    
743
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
744
                        help="Hypervisor and hypervisor options, in the"
745
                        " format hypervisor:option=value,option=value,...",
746
                        default=[], action="append", type="identkeyval")
747

    
748
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
749
                           action="store_false",
750
                           help="Don't check that the instance's IP"
751
                           " is alive")
752

    
753
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
754
                             default=True, action="store_false",
755
                             help="Don't check that the instance's name"
756
                             " is resolvable")
757

    
758
NET_OPT = cli_option("--net",
759
                     help="NIC parameters", default=[],
760
                     dest="nics", action="append", type="identkeyval")
761

    
762
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
763
                      dest="disks", action="append", type="identkeyval")
764

    
765
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
766
                         help="Comma-separated list of disks"
767
                         " indices to act on (e.g. 0,2) (optional,"
768
                         " defaults to all disks)")
769

    
770
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
771
                         help="Enforces a single-disk configuration using the"
772
                         " given disk size, in MiB unless a suffix is used",
773
                         default=None, type="unit", metavar="<size>")
774

    
775
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
776
                                dest="ignore_consistency",
777
                                action="store_true", default=False,
778
                                help="Ignore the consistency of the disks on"
779
                                " the secondary")
780

    
781
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
782
                                dest="allow_failover",
783
                                action="store_true", default=False,
784
                                help="If migration is not possible fallback to"
785
                                     " failover")
786

    
787
NONLIVE_OPT = cli_option("--non-live", dest="live",
788
                         default=True, action="store_false",
789
                         help="Do a non-live migration (this usually means"
790
                         " freeze the instance, save the state, transfer and"
791
                         " only then resume running on the secondary node)")
792

    
793
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
794
                                default=None,
795
                                choices=list(constants.HT_MIGRATION_MODES),
796
                                help="Override default migration mode (choose"
797
                                " either live or non-live")
798

    
799
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
800
                                help="Target node and optional secondary node",
801
                                metavar="<pnode>[:<snode>]",
802
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
803

    
804
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
805
                           action="append", metavar="<node>",
806
                           help="Use only this node (can be used multiple"
807
                           " times, if not given defaults to all nodes)",
808
                           completion_suggest=OPT_COMPL_ONE_NODE)
809

    
810
NODEGROUP_OPT_NAME = "--node-group"
811
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
812
                           dest="nodegroup",
813
                           help="Node group (name or uuid)",
814
                           metavar="<nodegroup>",
815
                           default=None, type="string",
816
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
817

    
818
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
819
                             metavar="<node>",
820
                             completion_suggest=OPT_COMPL_ONE_NODE)
821

    
822
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
823
                         action="store_false",
824
                         help="Don't start the instance after creation")
825

    
826
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
827
                         action="store_true", default=False,
828
                         help="Show command instead of executing it")
829

    
830
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
831
                         default=False, action="store_true",
832
                         help="Instead of performing the migration, try to"
833
                         " recover from a failed cleanup. This is safe"
834
                         " to run even if the instance is healthy, but it"
835
                         " will create extra replication traffic and "
836
                         " disrupt briefly the replication (like during the"
837
                         " migration")
838

    
839
STATIC_OPT = cli_option("-s", "--static", dest="static",
840
                        action="store_true", default=False,
841
                        help="Only show configuration data, not runtime data")
842

    
843
ALL_OPT = cli_option("--all", dest="show_all",
844
                     default=False, action="store_true",
845
                     help="Show info on all instances on the cluster."
846
                     " This can take a long time to run, use wisely")
847

    
848
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
849
                           action="store_true", default=False,
850
                           help="Interactive OS reinstall, lists available"
851
                           " OS templates for selection")
852

    
853
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
854
                                 action="store_true", default=False,
855
                                 help="Remove the instance from the cluster"
856
                                 " configuration even if there are failures"
857
                                 " during the removal process")
858

    
859
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
860
                                        dest="ignore_remove_failures",
861
                                        action="store_true", default=False,
862
                                        help="Remove the instance from the"
863
                                        " cluster configuration even if there"
864
                                        " are failures during the removal"
865
                                        " process")
866

    
867
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
868
                                 action="store_true", default=False,
869
                                 help="Remove the instance from the cluster")
870

    
871
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
872
                               help="Specifies the new node for the instance",
873
                               metavar="NODE", default=None,
874
                               completion_suggest=OPT_COMPL_ONE_NODE)
875

    
876
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
877
                               help="Specifies the new secondary node",
878
                               metavar="NODE", default=None,
879
                               completion_suggest=OPT_COMPL_ONE_NODE)
880

    
881
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
882
                            default=False, action="store_true",
883
                            help="Replace the disk(s) on the primary"
884
                                 " node (applies only to internally mirrored"
885
                                 " disk templates, e.g. %s)" %
886
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
887

    
888
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
889
                              default=False, action="store_true",
890
                              help="Replace the disk(s) on the secondary"
891
                                   " node (applies only to internally mirrored"
892
                                   " disk templates, e.g. %s)" %
893
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
894

    
895
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
896
                              default=False, action="store_true",
897
                              help="Lock all nodes and auto-promote as needed"
898
                              " to MC status")
899

    
900
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
901
                              default=False, action="store_true",
902
                              help="Automatically replace faulty disks"
903
                                   " (applies only to internally mirrored"
904
                                   " disk templates, e.g. %s)" %
905
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
906

    
907
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
908
                             default=False, action="store_true",
909
                             help="Ignore current recorded size"
910
                             " (useful for forcing activation when"
911
                             " the recorded size is wrong)")
912

    
913
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
914
                          metavar="<node>",
915
                          completion_suggest=OPT_COMPL_ONE_NODE)
916

    
917
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
918
                         metavar="<dir>")
919

    
920
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
921
                              help="Specify the secondary ip for the node",
922
                              metavar="ADDRESS", default=None)
923

    
924
READD_OPT = cli_option("--readd", dest="readd",
925
                       default=False, action="store_true",
926
                       help="Readd old node after replacing it")
927

    
928
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
929
                                default=True, action="store_false",
930
                                help="Disable SSH key fingerprint checking")
931

    
932
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
933
                                 default=False, action="store_true",
934
                                 help="Force the joining of a node")
935

    
936
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
937
                    type="bool", default=None, metavar=_YORNO,
938
                    help="Set the master_candidate flag on the node")
939

    
940
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
941
                         type="bool", default=None,
942
                         help=("Set the offline flag on the node"
943
                               " (cluster does not communicate with offline"
944
                               " nodes)"))
945

    
946
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
947
                         type="bool", default=None,
948
                         help=("Set the drained flag on the node"
949
                               " (excluded from allocation operations)"))
950

    
951
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
952
                    type="bool", default=None, metavar=_YORNO,
953
                    help="Set the master_capable flag on the node")
954

    
955
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
956
                    type="bool", default=None, metavar=_YORNO,
957
                    help="Set the vm_capable flag on the node")
958

    
959
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
960
                             type="bool", default=None, metavar=_YORNO,
961
                             help="Set the allocatable flag on a volume")
962

    
963
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
964
                               help="Disable support for lvm based instances"
965
                               " (cluster-wide)",
966
                               action="store_false", default=True)
967

    
968
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
969
                            dest="enabled_hypervisors",
970
                            help="Comma-separated list of hypervisors",
971
                            type="string", default=None)
972

    
973
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
974
                            type="keyval", default={},
975
                            help="NIC parameters")
976

    
977
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
978
                         dest="candidate_pool_size", type="int",
979
                         help="Set the candidate pool size")
980

    
981
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
982
                         help=("Enables LVM and specifies the volume group"
983
                               " name (cluster-wide) for disk allocation"
984
                               " [%s]" % constants.DEFAULT_VG),
985
                         metavar="VG", default=None)
986

    
987
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
988
                          help="Destroy cluster", action="store_true")
989

    
990
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
991
                          help="Skip node agreement check (dangerous)",
992
                          action="store_true", default=False)
993

    
994
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
995
                            help="Specify the mac prefix for the instance IP"
996
                            " addresses, in the format XX:XX:XX",
997
                            metavar="PREFIX",
998
                            default=None)
999

    
1000
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1001
                               help="Specify the node interface (cluster-wide)"
1002
                               " on which the master IP address will be added"
1003
                               " (cluster init default: %s)" %
1004
                               constants.DEFAULT_BRIDGE,
1005
                               metavar="NETDEV",
1006
                               default=None)
1007

    
1008
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1009
                                help="Specify the default directory (cluster-"
1010
                                "wide) for storing the file-based disks [%s]" %
1011
                                constants.DEFAULT_FILE_STORAGE_DIR,
1012
                                metavar="DIR",
1013
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1014

    
1015
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1016
                            dest="shared_file_storage_dir",
1017
                            help="Specify the default directory (cluster-"
1018
                            "wide) for storing the shared file-based"
1019
                            " disks [%s]" %
1020
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1021
                            metavar="SHAREDDIR",
1022
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1023

    
1024
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1025
                                   help="Don't modify /etc/hosts",
1026
                                   action="store_false", default=True)
1027

    
1028
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1029
                                    help="Don't initialize SSH keys",
1030
                                    action="store_false", default=True)
1031

    
1032
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1033
                             help="Enable parseable error messages",
1034
                             action="store_true", default=False)
1035

    
1036
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1037
                          help="Skip N+1 memory redundancy tests",
1038
                          action="store_true", default=False)
1039

    
1040
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1041
                             help="Type of reboot: soft/hard/full",
1042
                             default=constants.INSTANCE_REBOOT_HARD,
1043
                             metavar="<REBOOT>",
1044
                             choices=list(constants.REBOOT_TYPES))
1045

    
1046
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1047
                                    dest="ignore_secondaries",
1048
                                    default=False, action="store_true",
1049
                                    help="Ignore errors from secondaries")
1050

    
1051
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1052
                            action="store_false", default=True,
1053
                            help="Don't shutdown the instance (unsafe)")
1054

    
1055
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1056
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1057
                         help="Maximum time to wait")
1058

    
1059
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1060
                         dest="shutdown_timeout", type="int",
1061
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1062
                         help="Maximum time to wait for instance shutdown")
1063

    
1064
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1065
                          default=None,
1066
                          help=("Number of seconds between repetions of the"
1067
                                " command"))
1068

    
1069
EARLY_RELEASE_OPT = cli_option("--early-release",
1070
                               dest="early_release", default=False,
1071
                               action="store_true",
1072
                               help="Release the locks on the secondary"
1073
                               " node(s) early")
1074

    
1075
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1076
                                  dest="new_cluster_cert",
1077
                                  default=False, action="store_true",
1078
                                  help="Generate a new cluster certificate")
1079

    
1080
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1081
                           default=None,
1082
                           help="File containing new RAPI certificate")
1083

    
1084
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1085
                               default=None, action="store_true",
1086
                               help=("Generate a new self-signed RAPI"
1087
                                     " certificate"))
1088

    
1089
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1090
                           default=None,
1091
                           help="File containing new SPICE certificate")
1092

    
1093
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1094
                           default=None,
1095
                           help="File containing the certificate of the CA"
1096
                                " which signed the SPICE certificate")
1097

    
1098
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1099
                               dest="new_spice_cert", default=None,
1100
                               action="store_true",
1101
                               help=("Generate a new self-signed SPICE"
1102
                                     " certificate"))
1103

    
1104
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1105
                                    dest="new_confd_hmac_key",
1106
                                    default=False, action="store_true",
1107
                                    help=("Create a new HMAC key for %s" %
1108
                                          constants.CONFD))
1109

    
1110
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1111
                                       dest="cluster_domain_secret",
1112
                                       default=None,
1113
                                       help=("Load new new cluster domain"
1114
                                             " secret from file"))
1115

    
1116
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1117
                                           dest="new_cluster_domain_secret",
1118
                                           default=False, action="store_true",
1119
                                           help=("Create a new cluster domain"
1120
                                                 " secret"))
1121

    
1122
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1123
                              dest="use_replication_network",
1124
                              help="Whether to use the replication network"
1125
                              " for talking to the nodes",
1126
                              action="store_true", default=False)
1127

    
1128
MAINTAIN_NODE_HEALTH_OPT = \
1129
    cli_option("--maintain-node-health", dest="maintain_node_health",
1130
               metavar=_YORNO, default=None, type="bool",
1131
               help="Configure the cluster to automatically maintain node"
1132
               " health, by shutting down unknown instances, shutting down"
1133
               " unknown DRBD devices, etc.")
1134

    
1135
IDENTIFY_DEFAULTS_OPT = \
1136
    cli_option("--identify-defaults", dest="identify_defaults",
1137
               default=False, action="store_true",
1138
               help="Identify which saved instance parameters are equal to"
1139
               " the current cluster defaults and set them as such, instead"
1140
               " of marking them as overridden")
1141

    
1142
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1143
                         action="store", dest="uid_pool",
1144
                         help=("A list of user-ids or user-id"
1145
                               " ranges separated by commas"))
1146

    
1147
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1148
                          action="store", dest="add_uids",
1149
                          help=("A list of user-ids or user-id"
1150
                                " ranges separated by commas, to be"
1151
                                " added to the user-id pool"))
1152

    
1153
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1154
                             action="store", dest="remove_uids",
1155
                             help=("A list of user-ids or user-id"
1156
                                   " ranges separated by commas, to be"
1157
                                   " removed from the user-id pool"))
1158

    
1159
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1160
                             action="store", dest="reserved_lvs",
1161
                             help=("A comma-separated list of reserved"
1162
                                   " logical volumes names, that will be"
1163
                                   " ignored by cluster verify"))
1164

    
1165
ROMAN_OPT = cli_option("--roman",
1166
                       dest="roman_integers", default=False,
1167
                       action="store_true",
1168
                       help="Use roman numbers for positive integers")
1169

    
1170
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1171
                             action="store", default=None,
1172
                             help="Specifies usermode helper for DRBD")
1173

    
1174
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1175
                                action="store_false", default=True,
1176
                                help="Disable support for DRBD")
1177

    
1178
PRIMARY_IP_VERSION_OPT = \
1179
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1180
               action="store", dest="primary_ip_version",
1181
               metavar="%d|%d" % (constants.IP4_VERSION,
1182
                                  constants.IP6_VERSION),
1183
               help="Cluster-wide IP version for primary IP")
1184

    
1185
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1186
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1187
                          choices=_PRIONAME_TO_VALUE.keys(),
1188
                          help="Priority for opcode processing")
1189

    
1190
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1191
                        type="bool", default=None, metavar=_YORNO,
1192
                        help="Sets the hidden flag on the OS")
1193

    
1194
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1195
                        type="bool", default=None, metavar=_YORNO,
1196
                        help="Sets the blacklisted flag on the OS")
1197

    
1198
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1199
                                     type="bool", metavar=_YORNO,
1200
                                     dest="prealloc_wipe_disks",
1201
                                     help=("Wipe disks prior to instance"
1202
                                           " creation"))
1203

    
1204
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1205
                             type="keyval", default=None,
1206
                             help="Node parameters")
1207

    
1208
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1209
                              action="store", metavar="POLICY", default=None,
1210
                              help="Allocation policy for the node group")
1211

    
1212
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1213
                              type="bool", metavar=_YORNO,
1214
                              dest="node_powered",
1215
                              help="Specify if the SoR for node is powered")
1216

    
1217
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1218
                         default=constants.OOB_TIMEOUT,
1219
                         help="Maximum time to wait for out-of-band helper")
1220

    
1221
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1222
                             default=constants.OOB_POWER_DELAY,
1223
                             help="Time in seconds to wait between power-ons")
1224

    
1225
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1226
                              action="store_true", default=False,
1227
                              help=("Whether command argument should be treated"
1228
                                    " as filter"))
1229

    
1230
NO_REMEMBER_OPT = cli_option("--no-remember",
1231
                             dest="no_remember",
1232
                             action="store_true", default=False,
1233
                             help="Perform but do not record the change"
1234
                             " in the configuration")
1235

    
1236
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1237
                              default=False, action="store_true",
1238
                              help="Evacuate primary instances only")
1239

    
1240
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1241
                                default=False, action="store_true",
1242
                                help="Evacuate secondary instances only"
1243
                                     " (applies only to internally mirrored"
1244
                                     " disk templates, e.g. %s)" %
1245
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1246

    
1247
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1248
                                action="store_true", default=False,
1249
                                help="Pause instance at startup")
1250

    
1251
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1252
                          help="Destination node group (name or uuid)",
1253
                          default=None, action="append",
1254
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1255

    
1256

    
1257
#: Options provided by all commands
1258
COMMON_OPTS = [DEBUG_OPT]
1259

    
1260
# common options for creating instances. add and import then add their own
1261
# specific ones.
1262
COMMON_CREATE_OPTS = [
1263
  BACKEND_OPT,
1264
  DISK_OPT,
1265
  DISK_TEMPLATE_OPT,
1266
  FILESTORE_DIR_OPT,
1267
  FILESTORE_DRIVER_OPT,
1268
  HYPERVISOR_OPT,
1269
  IALLOCATOR_OPT,
1270
  NET_OPT,
1271
  NODE_PLACEMENT_OPT,
1272
  NOIPCHECK_OPT,
1273
  NONAMECHECK_OPT,
1274
  NONICS_OPT,
1275
  NWSYNC_OPT,
1276
  OSPARAMS_OPT,
1277
  OS_SIZE_OPT,
1278
  SUBMIT_OPT,
1279
  TAG_ADD_OPT,
1280
  DRY_RUN_OPT,
1281
  PRIORITY_OPT,
1282
  ]
1283

    
1284

    
1285
def _ParseArgs(argv, commands, aliases):
1286
  """Parser for the command line arguments.
1287

1288
  This function parses the arguments and returns the function which
1289
  must be executed together with its (modified) arguments.
1290

1291
  @param argv: the command line
1292
  @param commands: dictionary with special contents, see the design
1293
      doc for cmdline handling
1294
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1295

1296
  """
1297
  if len(argv) == 0:
1298
    binary = "<command>"
1299
  else:
1300
    binary = argv[0].split("/")[-1]
1301

    
1302
  if len(argv) > 1 and argv[1] == "--version":
1303
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1304
             constants.RELEASE_VERSION)
1305
    # Quit right away. That way we don't have to care about this special
1306
    # argument. optparse.py does it the same.
1307
    sys.exit(0)
1308

    
1309
  if len(argv) < 2 or not (argv[1] in commands or
1310
                           argv[1] in aliases):
1311
    # let's do a nice thing
1312
    sortedcmds = commands.keys()
1313
    sortedcmds.sort()
1314

    
1315
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1316
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1317
    ToStdout("")
1318

    
1319
    # compute the max line length for cmd + usage
1320
    mlen = max([len(" %s" % cmd) for cmd in commands])
1321
    mlen = min(60, mlen) # should not get here...
1322

    
1323
    # and format a nice command list
1324
    ToStdout("Commands:")
1325
    for cmd in sortedcmds:
1326
      cmdstr = " %s" % (cmd,)
1327
      help_text = commands[cmd][4]
1328
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1329
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1330
      for line in help_lines:
1331
        ToStdout("%-*s   %s", mlen, "", line)
1332

    
1333
    ToStdout("")
1334

    
1335
    return None, None, None
1336

    
1337
  # get command, unalias it, and look it up in commands
1338
  cmd = argv.pop(1)
1339
  if cmd in aliases:
1340
    if cmd in commands:
1341
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1342
                                   " command" % cmd)
1343

    
1344
    if aliases[cmd] not in commands:
1345
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1346
                                   " command '%s'" % (cmd, aliases[cmd]))
1347

    
1348
    cmd = aliases[cmd]
1349

    
1350
  func, args_def, parser_opts, usage, description = commands[cmd]
1351
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1352
                        description=description,
1353
                        formatter=TitledHelpFormatter(),
1354
                        usage="%%prog %s %s" % (cmd, usage))
1355
  parser.disable_interspersed_args()
1356
  options, args = parser.parse_args()
1357

    
1358
  if not _CheckArguments(cmd, args_def, args):
1359
    return None, None, None
1360

    
1361
  return func, options, args
1362

    
1363

    
1364
def _CheckArguments(cmd, args_def, args):
1365
  """Verifies the arguments using the argument definition.
1366

1367
  Algorithm:
1368

1369
    1. Abort with error if values specified by user but none expected.
1370

1371
    1. For each argument in definition
1372

1373
      1. Keep running count of minimum number of values (min_count)
1374
      1. Keep running count of maximum number of values (max_count)
1375
      1. If it has an unlimited number of values
1376

1377
        1. Abort with error if it's not the last argument in the definition
1378

1379
    1. If last argument has limited number of values
1380

1381
      1. Abort with error if number of values doesn't match or is too large
1382

1383
    1. Abort with error if user didn't pass enough values (min_count)
1384

1385
  """
1386
  if args and not args_def:
1387
    ToStderr("Error: Command %s expects no arguments", cmd)
1388
    return False
1389

    
1390
  min_count = None
1391
  max_count = None
1392
  check_max = None
1393

    
1394
  last_idx = len(args_def) - 1
1395

    
1396
  for idx, arg in enumerate(args_def):
1397
    if min_count is None:
1398
      min_count = arg.min
1399
    elif arg.min is not None:
1400
      min_count += arg.min
1401

    
1402
    if max_count is None:
1403
      max_count = arg.max
1404
    elif arg.max is not None:
1405
      max_count += arg.max
1406

    
1407
    if idx == last_idx:
1408
      check_max = (arg.max is not None)
1409

    
1410
    elif arg.max is None:
1411
      raise errors.ProgrammerError("Only the last argument can have max=None")
1412

    
1413
  if check_max:
1414
    # Command with exact number of arguments
1415
    if (min_count is not None and max_count is not None and
1416
        min_count == max_count and len(args) != min_count):
1417
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1418
      return False
1419

    
1420
    # Command with limited number of arguments
1421
    if max_count is not None and len(args) > max_count:
1422
      ToStderr("Error: Command %s expects only %d argument(s)",
1423
               cmd, max_count)
1424
      return False
1425

    
1426
  # Command with some required arguments
1427
  if min_count is not None and len(args) < min_count:
1428
    ToStderr("Error: Command %s expects at least %d argument(s)",
1429
             cmd, min_count)
1430
    return False
1431

    
1432
  return True
1433

    
1434

    
1435
def SplitNodeOption(value):
1436
  """Splits the value of a --node option.
1437

1438
  """
1439
  if value and ":" in value:
1440
    return value.split(":", 1)
1441
  else:
1442
    return (value, None)
1443

    
1444

    
1445
def CalculateOSNames(os_name, os_variants):
1446
  """Calculates all the names an OS can be called, according to its variants.
1447

1448
  @type os_name: string
1449
  @param os_name: base name of the os
1450
  @type os_variants: list or None
1451
  @param os_variants: list of supported variants
1452
  @rtype: list
1453
  @return: list of valid names
1454

1455
  """
1456
  if os_variants:
1457
    return ["%s+%s" % (os_name, v) for v in os_variants]
1458
  else:
1459
    return [os_name]
1460

    
1461

    
1462
def ParseFields(selected, default):
1463
  """Parses the values of "--field"-like options.
1464

1465
  @type selected: string or None
1466
  @param selected: User-selected options
1467
  @type default: list
1468
  @param default: Default fields
1469

1470
  """
1471
  if selected is None:
1472
    return default
1473

    
1474
  if selected.startswith("+"):
1475
    return default + selected[1:].split(",")
1476

    
1477
  return selected.split(",")
1478

    
1479

    
1480
UsesRPC = rpc.RunWithRPC
1481

    
1482

    
1483
def AskUser(text, choices=None):
1484
  """Ask the user a question.
1485

1486
  @param text: the question to ask
1487

1488
  @param choices: list with elements tuples (input_char, return_value,
1489
      description); if not given, it will default to: [('y', True,
1490
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1491
      note that the '?' char is reserved for help
1492

1493
  @return: one of the return values from the choices list; if input is
1494
      not possible (i.e. not running with a tty, we return the last
1495
      entry from the list
1496

1497
  """
1498
  if choices is None:
1499
    choices = [("y", True, "Perform the operation"),
1500
               ("n", False, "Do not perform the operation")]
1501
  if not choices or not isinstance(choices, list):
1502
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1503
  for entry in choices:
1504
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1505
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1506

    
1507
  answer = choices[-1][1]
1508
  new_text = []
1509
  for line in text.splitlines():
1510
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1511
  text = "\n".join(new_text)
1512
  try:
1513
    f = file("/dev/tty", "a+")
1514
  except IOError:
1515
    return answer
1516
  try:
1517
    chars = [entry[0] for entry in choices]
1518
    chars[-1] = "[%s]" % chars[-1]
1519
    chars.append("?")
1520
    maps = dict([(entry[0], entry[1]) for entry in choices])
1521
    while True:
1522
      f.write(text)
1523
      f.write("\n")
1524
      f.write("/".join(chars))
1525
      f.write(": ")
1526
      line = f.readline(2).strip().lower()
1527
      if line in maps:
1528
        answer = maps[line]
1529
        break
1530
      elif line == "?":
1531
        for entry in choices:
1532
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1533
        f.write("\n")
1534
        continue
1535
  finally:
1536
    f.close()
1537
  return answer
1538

    
1539

    
1540
class JobSubmittedException(Exception):
1541
  """Job was submitted, client should exit.
1542

1543
  This exception has one argument, the ID of the job that was
1544
  submitted. The handler should print this ID.
1545

1546
  This is not an error, just a structured way to exit from clients.
1547

1548
  """
1549

    
1550

    
1551
def SendJob(ops, cl=None):
1552
  """Function to submit an opcode without waiting for the results.
1553

1554
  @type ops: list
1555
  @param ops: list of opcodes
1556
  @type cl: luxi.Client
1557
  @param cl: the luxi client to use for communicating with the master;
1558
             if None, a new client will be created
1559

1560
  """
1561
  if cl is None:
1562
    cl = GetClient()
1563

    
1564
  job_id = cl.SubmitJob(ops)
1565

    
1566
  return job_id
1567

    
1568

    
1569
def GenericPollJob(job_id, cbs, report_cbs):
1570
  """Generic job-polling function.
1571

1572
  @type job_id: number
1573
  @param job_id: Job ID
1574
  @type cbs: Instance of L{JobPollCbBase}
1575
  @param cbs: Data callbacks
1576
  @type report_cbs: Instance of L{JobPollReportCbBase}
1577
  @param report_cbs: Reporting callbacks
1578

1579
  """
1580
  prev_job_info = None
1581
  prev_logmsg_serial = None
1582

    
1583
  status = None
1584

    
1585
  while True:
1586
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1587
                                      prev_logmsg_serial)
1588
    if not result:
1589
      # job not found, go away!
1590
      raise errors.JobLost("Job with id %s lost" % job_id)
1591

    
1592
    if result == constants.JOB_NOTCHANGED:
1593
      report_cbs.ReportNotChanged(job_id, status)
1594

    
1595
      # Wait again
1596
      continue
1597

    
1598
    # Split result, a tuple of (field values, log entries)
1599
    (job_info, log_entries) = result
1600
    (status, ) = job_info
1601

    
1602
    if log_entries:
1603
      for log_entry in log_entries:
1604
        (serial, timestamp, log_type, message) = log_entry
1605
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1606
                                    log_type, message)
1607
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1608

    
1609
    # TODO: Handle canceled and archived jobs
1610
    elif status in (constants.JOB_STATUS_SUCCESS,
1611
                    constants.JOB_STATUS_ERROR,
1612
                    constants.JOB_STATUS_CANCELING,
1613
                    constants.JOB_STATUS_CANCELED):
1614
      break
1615

    
1616
    prev_job_info = job_info
1617

    
1618
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1619
  if not jobs:
1620
    raise errors.JobLost("Job with id %s lost" % job_id)
1621

    
1622
  status, opstatus, result = jobs[0]
1623

    
1624
  if status == constants.JOB_STATUS_SUCCESS:
1625
    return result
1626

    
1627
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1628
    raise errors.OpExecError("Job was canceled")
1629

    
1630
  has_ok = False
1631
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1632
    if status == constants.OP_STATUS_SUCCESS:
1633
      has_ok = True
1634
    elif status == constants.OP_STATUS_ERROR:
1635
      errors.MaybeRaise(msg)
1636

    
1637
      if has_ok:
1638
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1639
                                 (idx, msg))
1640

    
1641
      raise errors.OpExecError(str(msg))
1642

    
1643
  # default failure mode
1644
  raise errors.OpExecError(result)
1645

    
1646

    
1647
class JobPollCbBase:
1648
  """Base class for L{GenericPollJob} callbacks.
1649

1650
  """
1651
  def __init__(self):
1652
    """Initializes this class.
1653

1654
    """
1655

    
1656
  def WaitForJobChangeOnce(self, job_id, fields,
1657
                           prev_job_info, prev_log_serial):
1658
    """Waits for changes on a job.
1659

1660
    """
1661
    raise NotImplementedError()
1662

    
1663
  def QueryJobs(self, job_ids, fields):
1664
    """Returns the selected fields for the selected job IDs.
1665

1666
    @type job_ids: list of numbers
1667
    @param job_ids: Job IDs
1668
    @type fields: list of strings
1669
    @param fields: Fields
1670

1671
    """
1672
    raise NotImplementedError()
1673

    
1674

    
1675
class JobPollReportCbBase:
1676
  """Base class for L{GenericPollJob} reporting callbacks.
1677

1678
  """
1679
  def __init__(self):
1680
    """Initializes this class.
1681

1682
    """
1683

    
1684
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1685
    """Handles a log message.
1686

1687
    """
1688
    raise NotImplementedError()
1689

    
1690
  def ReportNotChanged(self, job_id, status):
1691
    """Called for if a job hasn't changed in a while.
1692

1693
    @type job_id: number
1694
    @param job_id: Job ID
1695
    @type status: string or None
1696
    @param status: Job status if available
1697

1698
    """
1699
    raise NotImplementedError()
1700

    
1701

    
1702
class _LuxiJobPollCb(JobPollCbBase):
1703
  def __init__(self, cl):
1704
    """Initializes this class.
1705

1706
    """
1707
    JobPollCbBase.__init__(self)
1708
    self.cl = cl
1709

    
1710
  def WaitForJobChangeOnce(self, job_id, fields,
1711
                           prev_job_info, prev_log_serial):
1712
    """Waits for changes on a job.
1713

1714
    """
1715
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1716
                                        prev_job_info, prev_log_serial)
1717

    
1718
  def QueryJobs(self, job_ids, fields):
1719
    """Returns the selected fields for the selected job IDs.
1720

1721
    """
1722
    return self.cl.QueryJobs(job_ids, fields)
1723

    
1724

    
1725
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1726
  def __init__(self, feedback_fn):
1727
    """Initializes this class.
1728

1729
    """
1730
    JobPollReportCbBase.__init__(self)
1731

    
1732
    self.feedback_fn = feedback_fn
1733

    
1734
    assert callable(feedback_fn)
1735

    
1736
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1737
    """Handles a log message.
1738

1739
    """
1740
    self.feedback_fn((timestamp, log_type, log_msg))
1741

    
1742
  def ReportNotChanged(self, job_id, status):
1743
    """Called if a job hasn't changed in a while.
1744

1745
    """
1746
    # Ignore
1747

    
1748

    
1749
class StdioJobPollReportCb(JobPollReportCbBase):
1750
  def __init__(self):
1751
    """Initializes this class.
1752

1753
    """
1754
    JobPollReportCbBase.__init__(self)
1755

    
1756
    self.notified_queued = False
1757
    self.notified_waitlock = False
1758

    
1759
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1760
    """Handles a log message.
1761

1762
    """
1763
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1764
             FormatLogMessage(log_type, log_msg))
1765

    
1766
  def ReportNotChanged(self, job_id, status):
1767
    """Called if a job hasn't changed in a while.
1768

1769
    """
1770
    if status is None:
1771
      return
1772

    
1773
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1774
      ToStderr("Job %s is waiting in queue", job_id)
1775
      self.notified_queued = True
1776

    
1777
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1778
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1779
      self.notified_waitlock = True
1780

    
1781

    
1782
def FormatLogMessage(log_type, log_msg):
1783
  """Formats a job message according to its type.
1784

1785
  """
1786
  if log_type != constants.ELOG_MESSAGE:
1787
    log_msg = str(log_msg)
1788

    
1789
  return utils.SafeEncode(log_msg)
1790

    
1791

    
1792
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1793
  """Function to poll for the result of a job.
1794

1795
  @type job_id: job identified
1796
  @param job_id: the job to poll for results
1797
  @type cl: luxi.Client
1798
  @param cl: the luxi client to use for communicating with the master;
1799
             if None, a new client will be created
1800

1801
  """
1802
  if cl is None:
1803
    cl = GetClient()
1804

    
1805
  if reporter is None:
1806
    if feedback_fn:
1807
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1808
    else:
1809
      reporter = StdioJobPollReportCb()
1810
  elif feedback_fn:
1811
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1812

    
1813
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1814

    
1815

    
1816
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1817
  """Legacy function to submit an opcode.
1818

1819
  This is just a simple wrapper over the construction of the processor
1820
  instance. It should be extended to better handle feedback and
1821
  interaction functions.
1822

1823
  """
1824
  if cl is None:
1825
    cl = GetClient()
1826

    
1827
  SetGenericOpcodeOpts([op], opts)
1828

    
1829
  job_id = SendJob([op], cl=cl)
1830

    
1831
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1832
                       reporter=reporter)
1833

    
1834
  return op_results[0]
1835

    
1836

    
1837
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1838
  """Wrapper around SubmitOpCode or SendJob.
1839

1840
  This function will decide, based on the 'opts' parameter, whether to
1841
  submit and wait for the result of the opcode (and return it), or
1842
  whether to just send the job and print its identifier. It is used in
1843
  order to simplify the implementation of the '--submit' option.
1844

1845
  It will also process the opcodes if we're sending the via SendJob
1846
  (otherwise SubmitOpCode does it).
1847

1848
  """
1849
  if opts and opts.submit_only:
1850
    job = [op]
1851
    SetGenericOpcodeOpts(job, opts)
1852
    job_id = SendJob(job, cl=cl)
1853
    raise JobSubmittedException(job_id)
1854
  else:
1855
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1856

    
1857

    
1858
def SetGenericOpcodeOpts(opcode_list, options):
1859
  """Processor for generic options.
1860

1861
  This function updates the given opcodes based on generic command
1862
  line options (like debug, dry-run, etc.).
1863

1864
  @param opcode_list: list of opcodes
1865
  @param options: command line options or None
1866
  @return: None (in-place modification)
1867

1868
  """
1869
  if not options:
1870
    return
1871
  for op in opcode_list:
1872
    op.debug_level = options.debug
1873
    if hasattr(options, "dry_run"):
1874
      op.dry_run = options.dry_run
1875
    if getattr(options, "priority", None) is not None:
1876
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1877

    
1878

    
1879
def GetClient():
1880
  # TODO: Cache object?
1881
  try:
1882
    client = luxi.Client()
1883
  except luxi.NoMasterError:
1884
    ss = ssconf.SimpleStore()
1885

    
1886
    # Try to read ssconf file
1887
    try:
1888
      ss.GetMasterNode()
1889
    except errors.ConfigurationError:
1890
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1891
                                 " not part of a cluster")
1892

    
1893
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1894
    if master != myself:
1895
      raise errors.OpPrereqError("This is not the master node, please connect"
1896
                                 " to node '%s' and rerun the command" %
1897
                                 master)
1898
    raise
1899
  return client
1900

    
1901

    
1902
def FormatError(err):
1903
  """Return a formatted error message for a given error.
1904

1905
  This function takes an exception instance and returns a tuple
1906
  consisting of two values: first, the recommended exit code, and
1907
  second, a string describing the error message (not
1908
  newline-terminated).
1909

1910
  """
1911
  retcode = 1
1912
  obuf = StringIO()
1913
  msg = str(err)
1914
  if isinstance(err, errors.ConfigurationError):
1915
    txt = "Corrupt configuration file: %s" % msg
1916
    logging.error(txt)
1917
    obuf.write(txt + "\n")
1918
    obuf.write("Aborting.")
1919
    retcode = 2
1920
  elif isinstance(err, errors.HooksAbort):
1921
    obuf.write("Failure: hooks execution failed:\n")
1922
    for node, script, out in err.args[0]:
1923
      if out:
1924
        obuf.write("  node: %s, script: %s, output: %s\n" %
1925
                   (node, script, out))
1926
      else:
1927
        obuf.write("  node: %s, script: %s (no output)\n" %
1928
                   (node, script))
1929
  elif isinstance(err, errors.HooksFailure):
1930
    obuf.write("Failure: hooks general failure: %s" % msg)
1931
  elif isinstance(err, errors.ResolverError):
1932
    this_host = netutils.Hostname.GetSysName()
1933
    if err.args[0] == this_host:
1934
      msg = "Failure: can't resolve my own hostname ('%s')"
1935
    else:
1936
      msg = "Failure: can't resolve hostname '%s'"
1937
    obuf.write(msg % err.args[0])
1938
  elif isinstance(err, errors.OpPrereqError):
1939
    if len(err.args) == 2:
1940
      obuf.write("Failure: prerequisites not met for this"
1941
               " operation:\nerror type: %s, error details:\n%s" %
1942
                 (err.args[1], err.args[0]))
1943
    else:
1944
      obuf.write("Failure: prerequisites not met for this"
1945
                 " operation:\n%s" % msg)
1946
  elif isinstance(err, errors.OpExecError):
1947
    obuf.write("Failure: command execution error:\n%s" % msg)
1948
  elif isinstance(err, errors.TagError):
1949
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1950
  elif isinstance(err, errors.JobQueueDrainError):
1951
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1952
               " accept new requests\n")
1953
  elif isinstance(err, errors.JobQueueFull):
1954
    obuf.write("Failure: the job queue is full and doesn't accept new"
1955
               " job submissions until old jobs are archived\n")
1956
  elif isinstance(err, errors.TypeEnforcementError):
1957
    obuf.write("Parameter Error: %s" % msg)
1958
  elif isinstance(err, errors.ParameterError):
1959
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1960
  elif isinstance(err, luxi.NoMasterError):
1961
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1962
               " and listening for connections?")
1963
  elif isinstance(err, luxi.TimeoutError):
1964
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1965
               " been submitted and will continue to run even if the call"
1966
               " timed out. Useful commands in this situation are \"gnt-job"
1967
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1968
    obuf.write(msg)
1969
  elif isinstance(err, luxi.PermissionError):
1970
    obuf.write("It seems you don't have permissions to connect to the"
1971
               " master daemon.\nPlease retry as a different user.")
1972
  elif isinstance(err, luxi.ProtocolError):
1973
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1974
               "%s" % msg)
1975
  elif isinstance(err, errors.JobLost):
1976
    obuf.write("Error checking job status: %s" % msg)
1977
  elif isinstance(err, errors.QueryFilterParseError):
1978
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1979
    obuf.write("\n".join(err.GetDetails()))
1980
  elif isinstance(err, errors.GenericError):
1981
    obuf.write("Unhandled Ganeti error: %s" % msg)
1982
  elif isinstance(err, JobSubmittedException):
1983
    obuf.write("JobID: %s\n" % err.args[0])
1984
    retcode = 0
1985
  else:
1986
    obuf.write("Unhandled exception: %s" % msg)
1987
  return retcode, obuf.getvalue().rstrip("\n")
1988

    
1989

    
1990
def GenericMain(commands, override=None, aliases=None):
1991
  """Generic main function for all the gnt-* commands.
1992

1993
  Arguments:
1994
    - commands: a dictionary with a special structure, see the design doc
1995
                for command line handling.
1996
    - override: if not None, we expect a dictionary with keys that will
1997
                override command line options; this can be used to pass
1998
                options from the scripts to generic functions
1999
    - aliases: dictionary with command aliases {'alias': 'target, ...}
2000

2001
  """
2002
  # save the program name and the entire command line for later logging
2003
  if sys.argv:
2004
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2005
    if len(sys.argv) >= 2:
2006
      binary += " " + sys.argv[1]
2007
      old_cmdline = " ".join(sys.argv[2:])
2008
    else:
2009
      old_cmdline = ""
2010
  else:
2011
    binary = "<unknown program>"
2012
    old_cmdline = ""
2013

    
2014
  if aliases is None:
2015
    aliases = {}
2016

    
2017
  try:
2018
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
2019
  except errors.ParameterError, err:
2020
    result, err_msg = FormatError(err)
2021
    ToStderr(err_msg)
2022
    return 1
2023

    
2024
  if func is None: # parse error
2025
    return 1
2026

    
2027
  if override is not None:
2028
    for key, val in override.iteritems():
2029
      setattr(options, key, val)
2030

    
2031
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2032
                     stderr_logging=True)
2033

    
2034
  if old_cmdline:
2035
    logging.info("run with arguments '%s'", old_cmdline)
2036
  else:
2037
    logging.info("run with no arguments")
2038

    
2039
  try:
2040
    result = func(options, args)
2041
  except (errors.GenericError, luxi.ProtocolError,
2042
          JobSubmittedException), err:
2043
    result, err_msg = FormatError(err)
2044
    logging.exception("Error during command processing")
2045
    ToStderr(err_msg)
2046
  except KeyboardInterrupt:
2047
    result = constants.EXIT_FAILURE
2048
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2049
             " might have been submitted and"
2050
             " will continue to run in the background.")
2051
  except IOError, err:
2052
    if err.errno == errno.EPIPE:
2053
      # our terminal went away, we'll exit
2054
      sys.exit(constants.EXIT_FAILURE)
2055
    else:
2056
      raise
2057

    
2058
  return result
2059

    
2060

    
2061
def ParseNicOption(optvalue):
2062
  """Parses the value of the --net option(s).
2063

2064
  """
2065
  try:
2066
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2067
  except (TypeError, ValueError), err:
2068
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2069

    
2070
  nics = [{}] * nic_max
2071
  for nidx, ndict in optvalue:
2072
    nidx = int(nidx)
2073

    
2074
    if not isinstance(ndict, dict):
2075
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2076
                                 " got %s" % (nidx, ndict))
2077

    
2078
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2079

    
2080
    nics[nidx] = ndict
2081

    
2082
  return nics
2083

    
2084

    
2085
def GenericInstanceCreate(mode, opts, args):
2086
  """Add an instance to the cluster via either creation or import.
2087

2088
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2089
  @param opts: the command line options selected by the user
2090
  @type args: list
2091
  @param args: should contain only one element, the new instance name
2092
  @rtype: int
2093
  @return: the desired exit code
2094

2095
  """
2096
  instance = args[0]
2097

    
2098
  (pnode, snode) = SplitNodeOption(opts.node)
2099

    
2100
  hypervisor = None
2101
  hvparams = {}
2102
  if opts.hypervisor:
2103
    hypervisor, hvparams = opts.hypervisor
2104

    
2105
  if opts.nics:
2106
    nics = ParseNicOption(opts.nics)
2107
  elif opts.no_nics:
2108
    # no nics
2109
    nics = []
2110
  elif mode == constants.INSTANCE_CREATE:
2111
    # default of one nic, all auto
2112
    nics = [{}]
2113
  else:
2114
    # mode == import
2115
    nics = []
2116

    
2117
  if opts.disk_template == constants.DT_DISKLESS:
2118
    if opts.disks or opts.sd_size is not None:
2119
      raise errors.OpPrereqError("Diskless instance but disk"
2120
                                 " information passed")
2121
    disks = []
2122
  else:
2123
    if (not opts.disks and not opts.sd_size
2124
        and mode == constants.INSTANCE_CREATE):
2125
      raise errors.OpPrereqError("No disk information specified")
2126
    if opts.disks and opts.sd_size is not None:
2127
      raise errors.OpPrereqError("Please use either the '--disk' or"
2128
                                 " '-s' option")
2129
    if opts.sd_size is not None:
2130
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2131

    
2132
    if opts.disks:
2133
      try:
2134
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2135
      except ValueError, err:
2136
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2137
      disks = [{}] * disk_max
2138
    else:
2139
      disks = []
2140
    for didx, ddict in opts.disks:
2141
      didx = int(didx)
2142
      if not isinstance(ddict, dict):
2143
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2144
        raise errors.OpPrereqError(msg)
2145
      elif constants.IDISK_SIZE in ddict:
2146
        if constants.IDISK_ADOPT in ddict:
2147
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2148
                                     " (disk %d)" % didx)
2149
        try:
2150
          ddict[constants.IDISK_SIZE] = \
2151
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2152
        except ValueError, err:
2153
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2154
                                     (didx, err))
2155
      elif constants.IDISK_ADOPT in ddict:
2156
        if mode == constants.INSTANCE_IMPORT:
2157
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2158
                                     " import")
2159
        ddict[constants.IDISK_SIZE] = 0
2160
      else:
2161
        raise errors.OpPrereqError("Missing size or adoption source for"
2162
                                   " disk %d" % didx)
2163
      disks[didx] = ddict
2164

    
2165
  if opts.tags is not None:
2166
    tags = opts.tags.split(",")
2167
  else:
2168
    tags = []
2169

    
2170
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2171
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2172

    
2173
  if mode == constants.INSTANCE_CREATE:
2174
    start = opts.start
2175
    os_type = opts.os
2176
    force_variant = opts.force_variant
2177
    src_node = None
2178
    src_path = None
2179
    no_install = opts.no_install
2180
    identify_defaults = False
2181
  elif mode == constants.INSTANCE_IMPORT:
2182
    start = False
2183
    os_type = None
2184
    force_variant = False
2185
    src_node = opts.src_node
2186
    src_path = opts.src_dir
2187
    no_install = None
2188
    identify_defaults = opts.identify_defaults
2189
  else:
2190
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2191

    
2192
  op = opcodes.OpInstanceCreate(instance_name=instance,
2193
                                disks=disks,
2194
                                disk_template=opts.disk_template,
2195
                                nics=nics,
2196
                                pnode=pnode, snode=snode,
2197
                                ip_check=opts.ip_check,
2198
                                name_check=opts.name_check,
2199
                                wait_for_sync=opts.wait_for_sync,
2200
                                file_storage_dir=opts.file_storage_dir,
2201
                                file_driver=opts.file_driver,
2202
                                iallocator=opts.iallocator,
2203
                                hypervisor=hypervisor,
2204
                                hvparams=hvparams,
2205
                                beparams=opts.beparams,
2206
                                osparams=opts.osparams,
2207
                                mode=mode,
2208
                                start=start,
2209
                                os_type=os_type,
2210
                                force_variant=force_variant,
2211
                                src_node=src_node,
2212
                                src_path=src_path,
2213
                                tags=tags,
2214
                                no_install=no_install,
2215
                                identify_defaults=identify_defaults)
2216

    
2217
  SubmitOrSend(op, opts)
2218
  return 0
2219

    
2220

    
2221
class _RunWhileClusterStoppedHelper:
2222
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2223

2224
  """
2225
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2226
    """Initializes this class.
2227

2228
    @type feedback_fn: callable
2229
    @param feedback_fn: Feedback function
2230
    @type cluster_name: string
2231
    @param cluster_name: Cluster name
2232
    @type master_node: string
2233
    @param master_node Master node name
2234
    @type online_nodes: list
2235
    @param online_nodes: List of names of online nodes
2236

2237
    """
2238
    self.feedback_fn = feedback_fn
2239
    self.cluster_name = cluster_name
2240
    self.master_node = master_node
2241
    self.online_nodes = online_nodes
2242

    
2243
    self.ssh = ssh.SshRunner(self.cluster_name)
2244

    
2245
    self.nonmaster_nodes = [name for name in online_nodes
2246
                            if name != master_node]
2247

    
2248
    assert self.master_node not in self.nonmaster_nodes
2249

    
2250
  def _RunCmd(self, node_name, cmd):
2251
    """Runs a command on the local or a remote machine.
2252

2253
    @type node_name: string
2254
    @param node_name: Machine name
2255
    @type cmd: list
2256
    @param cmd: Command
2257

2258
    """
2259
    if node_name is None or node_name == self.master_node:
2260
      # No need to use SSH
2261
      result = utils.RunCmd(cmd)
2262
    else:
2263
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2264

    
2265
    if result.failed:
2266
      errmsg = ["Failed to run command %s" % result.cmd]
2267
      if node_name:
2268
        errmsg.append("on node %s" % node_name)
2269
      errmsg.append(": exitcode %s and error %s" %
2270
                    (result.exit_code, result.output))
2271
      raise errors.OpExecError(" ".join(errmsg))
2272

    
2273
  def Call(self, fn, *args):
2274
    """Call function while all daemons are stopped.
2275

2276
    @type fn: callable
2277
    @param fn: Function to be called
2278

2279
    """
2280
    # Pause watcher by acquiring an exclusive lock on watcher state file
2281
    self.feedback_fn("Blocking watcher")
2282
    watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2283
    try:
2284
      # TODO: Currently, this just blocks. There's no timeout.
2285
      # TODO: Should it be a shared lock?
2286
      watcher_block.Exclusive(blocking=True)
2287

    
2288
      # Stop master daemons, so that no new jobs can come in and all running
2289
      # ones are finished
2290
      self.feedback_fn("Stopping master daemons")
2291
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2292
      try:
2293
        # Stop daemons on all nodes
2294
        for node_name in self.online_nodes:
2295
          self.feedback_fn("Stopping daemons on %s" % node_name)
2296
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2297

    
2298
        # All daemons are shut down now
2299
        try:
2300
          return fn(self, *args)
2301
        except Exception, err:
2302
          _, errmsg = FormatError(err)
2303
          logging.exception("Caught exception")
2304
          self.feedback_fn(errmsg)
2305
          raise
2306
      finally:
2307
        # Start cluster again, master node last
2308
        for node_name in self.nonmaster_nodes + [self.master_node]:
2309
          self.feedback_fn("Starting daemons on %s" % node_name)
2310
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2311
    finally:
2312
      # Resume watcher
2313
      watcher_block.Close()
2314

    
2315

    
2316
def RunWhileClusterStopped(feedback_fn, fn, *args):
2317
  """Calls a function while all cluster daemons are stopped.
2318

2319
  @type feedback_fn: callable
2320
  @param feedback_fn: Feedback function
2321
  @type fn: callable
2322
  @param fn: Function to be called when daemons are stopped
2323

2324
  """
2325
  feedback_fn("Gathering cluster information")
2326

    
2327
  # This ensures we're running on the master daemon
2328
  cl = GetClient()
2329

    
2330
  (cluster_name, master_node) = \
2331
    cl.QueryConfigValues(["cluster_name", "master_node"])
2332

    
2333
  online_nodes = GetOnlineNodes([], cl=cl)
2334

    
2335
  # Don't keep a reference to the client. The master daemon will go away.
2336
  del cl
2337

    
2338
  assert master_node in online_nodes
2339

    
2340
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2341
                                       online_nodes).Call(fn, *args)
2342

    
2343

    
2344
def GenerateTable(headers, fields, separator, data,
2345
                  numfields=None, unitfields=None,
2346
                  units=None):
2347
  """Prints a table with headers and different fields.
2348

2349
  @type headers: dict
2350
  @param headers: dictionary mapping field names to headers for
2351
      the table
2352
  @type fields: list
2353
  @param fields: the field names corresponding to each row in
2354
      the data field
2355
  @param separator: the separator to be used; if this is None,
2356
      the default 'smart' algorithm is used which computes optimal
2357
      field width, otherwise just the separator is used between
2358
      each field
2359
  @type data: list
2360
  @param data: a list of lists, each sublist being one row to be output
2361
  @type numfields: list
2362
  @param numfields: a list with the fields that hold numeric
2363
      values and thus should be right-aligned
2364
  @type unitfields: list
2365
  @param unitfields: a list with the fields that hold numeric
2366
      values that should be formatted with the units field
2367
  @type units: string or None
2368
  @param units: the units we should use for formatting, or None for
2369
      automatic choice (human-readable for non-separator usage, otherwise
2370
      megabytes); this is a one-letter string
2371

2372
  """
2373
  if units is None:
2374
    if separator:
2375
      units = "m"
2376
    else:
2377
      units = "h"
2378

    
2379
  if numfields is None:
2380
    numfields = []
2381
  if unitfields is None:
2382
    unitfields = []
2383

    
2384
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2385
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2386

    
2387
  format_fields = []
2388
  for field in fields:
2389
    if headers and field not in headers:
2390
      # TODO: handle better unknown fields (either revert to old
2391
      # style of raising exception, or deal more intelligently with
2392
      # variable fields)
2393
      headers[field] = field
2394
    if separator is not None:
2395
      format_fields.append("%s")
2396
    elif numfields.Matches(field):
2397
      format_fields.append("%*s")
2398
    else:
2399
      format_fields.append("%-*s")
2400

    
2401
  if separator is None:
2402
    mlens = [0 for name in fields]
2403
    format_str = " ".join(format_fields)
2404
  else:
2405
    format_str = separator.replace("%", "%%").join(format_fields)
2406

    
2407
  for row in data:
2408
    if row is None:
2409
      continue
2410
    for idx, val in enumerate(row):
2411
      if unitfields.Matches(fields[idx]):
2412
        try:
2413
          val = int(val)
2414
        except (TypeError, ValueError):
2415
          pass
2416
        else:
2417
          val = row[idx] = utils.FormatUnit(val, units)
2418
      val = row[idx] = str(val)
2419
      if separator is None:
2420
        mlens[idx] = max(mlens[idx], len(val))
2421

    
2422
  result = []
2423
  if headers:
2424
    args = []
2425
    for idx, name in enumerate(fields):
2426
      hdr = headers[name]
2427
      if separator is None:
2428
        mlens[idx] = max(mlens[idx], len(hdr))
2429
        args.append(mlens[idx])
2430
      args.append(hdr)
2431
    result.append(format_str % tuple(args))
2432

    
2433
  if separator is None:
2434
    assert len(mlens) == len(fields)
2435

    
2436
    if fields and not numfields.Matches(fields[-1]):
2437
      mlens[-1] = 0
2438

    
2439
  for line in data:
2440
    args = []
2441
    if line is None:
2442
      line = ["-" for _ in fields]
2443
    for idx in range(len(fields)):
2444
      if separator is None:
2445
        args.append(mlens[idx])
2446
      args.append(line[idx])
2447
    result.append(format_str % tuple(args))
2448

    
2449
  return result
2450

    
2451

    
2452
def _FormatBool(value):
2453
  """Formats a boolean value as a string.
2454

2455
  """
2456
  if value:
2457
    return "Y"
2458
  return "N"
2459

    
2460

    
2461
#: Default formatting for query results; (callback, align right)
2462
_DEFAULT_FORMAT_QUERY = {
2463
  constants.QFT_TEXT: (str, False),
2464
  constants.QFT_BOOL: (_FormatBool, False),
2465
  constants.QFT_NUMBER: (str, True),
2466
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2467
  constants.QFT_OTHER: (str, False),
2468
  constants.QFT_UNKNOWN: (str, False),
2469
  }
2470

    
2471

    
2472
def _GetColumnFormatter(fdef, override, unit):
2473
  """Returns formatting function for a field.
2474

2475
  @type fdef: L{objects.QueryFieldDefinition}
2476
  @type override: dict
2477
  @param override: Dictionary for overriding field formatting functions,
2478
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2479
  @type unit: string
2480
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2481
  @rtype: tuple; (callable, bool)
2482
  @return: Returns the function to format a value (takes one parameter) and a
2483
    boolean for aligning the value on the right-hand side
2484

2485
  """
2486
  fmt = override.get(fdef.name, None)
2487
  if fmt is not None:
2488
    return fmt
2489

    
2490
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2491

    
2492
  if fdef.kind == constants.QFT_UNIT:
2493
    # Can't keep this information in the static dictionary
2494
    return (lambda value: utils.FormatUnit(value, unit), True)
2495

    
2496
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2497
  if fmt is not None:
2498
    return fmt
2499

    
2500
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2501

    
2502

    
2503
class _QueryColumnFormatter:
2504
  """Callable class for formatting fields of a query.
2505

2506
  """
2507
  def __init__(self, fn, status_fn, verbose):
2508
    """Initializes this class.
2509

2510
    @type fn: callable
2511
    @param fn: Formatting function
2512
    @type status_fn: callable
2513
    @param status_fn: Function to report fields' status
2514
    @type verbose: boolean
2515
    @param verbose: whether to use verbose field descriptions or not
2516

2517
    """
2518
    self._fn = fn
2519
    self._status_fn = status_fn
2520
    self._verbose = verbose
2521

    
2522
  def __call__(self, data):
2523
    """Returns a field's string representation.
2524

2525
    """
2526
    (status, value) = data
2527

    
2528
    # Report status
2529
    self._status_fn(status)
2530

    
2531
    if status == constants.RS_NORMAL:
2532
      return self._fn(value)
2533

    
2534
    assert value is None, \
2535
           "Found value %r for abnormal status %s" % (value, status)
2536

    
2537
    return FormatResultError(status, self._verbose)
2538

    
2539

    
2540
def FormatResultError(status, verbose):
2541
  """Formats result status other than L{constants.RS_NORMAL}.
2542

2543
  @param status: The result status
2544
  @type verbose: boolean
2545
  @param verbose: Whether to return the verbose text
2546
  @return: Text of result status
2547

2548
  """
2549
  assert status != constants.RS_NORMAL, \
2550
         "FormatResultError called with status equal to constants.RS_NORMAL"
2551
  try:
2552
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2553
  except KeyError:
2554
    raise NotImplementedError("Unknown status %s" % status)
2555
  else:
2556
    if verbose:
2557
      return verbose_text
2558
    return normal_text
2559

    
2560

    
2561
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2562
                      header=False, verbose=False):
2563
  """Formats data in L{objects.QueryResponse}.
2564

2565
  @type result: L{objects.QueryResponse}
2566
  @param result: result of query operation
2567
  @type unit: string
2568
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2569
    see L{utils.text.FormatUnit}
2570
  @type format_override: dict
2571
  @param format_override: Dictionary for overriding field formatting functions,
2572
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2573
  @type separator: string or None
2574
  @param separator: String used to separate fields
2575
  @type header: bool
2576
  @param header: Whether to output header row
2577
  @type verbose: boolean
2578
  @param verbose: whether to use verbose field descriptions or not
2579

2580
  """
2581
  if unit is None:
2582
    if separator:
2583
      unit = "m"
2584
    else:
2585
      unit = "h"
2586

    
2587
  if format_override is None:
2588
    format_override = {}
2589

    
2590
  stats = dict.fromkeys(constants.RS_ALL, 0)
2591

    
2592
  def _RecordStatus(status):
2593
    if status in stats:
2594
      stats[status] += 1
2595

    
2596
  columns = []
2597
  for fdef in result.fields:
2598
    assert fdef.title and fdef.name
2599
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2600
    columns.append(TableColumn(fdef.title,
2601
                               _QueryColumnFormatter(fn, _RecordStatus,
2602
                                                     verbose),
2603
                               align_right))
2604

    
2605
  table = FormatTable(result.data, columns, header, separator)
2606

    
2607
  # Collect statistics
2608
  assert len(stats) == len(constants.RS_ALL)
2609
  assert compat.all(count >= 0 for count in stats.values())
2610

    
2611
  # Determine overall status. If there was no data, unknown fields must be
2612
  # detected via the field definitions.
2613
  if (stats[constants.RS_UNKNOWN] or
2614
      (not result.data and _GetUnknownFields(result.fields))):
2615
    status = QR_UNKNOWN
2616
  elif compat.any(count > 0 for key, count in stats.items()
2617
                  if key != constants.RS_NORMAL):
2618
    status = QR_INCOMPLETE
2619
  else:
2620
    status = QR_NORMAL
2621

    
2622
  return (status, table)
2623

    
2624

    
2625
def _GetUnknownFields(fdefs):
2626
  """Returns list of unknown fields included in C{fdefs}.
2627

2628
  @type fdefs: list of L{objects.QueryFieldDefinition}
2629

2630
  """
2631
  return [fdef for fdef in fdefs
2632
          if fdef.kind == constants.QFT_UNKNOWN]
2633

    
2634

    
2635
def _WarnUnknownFields(fdefs):
2636
  """Prints a warning to stderr if a query included unknown fields.
2637

2638
  @type fdefs: list of L{objects.QueryFieldDefinition}
2639

2640
  """
2641
  unknown = _GetUnknownFields(fdefs)
2642
  if unknown:
2643
    ToStderr("Warning: Queried for unknown fields %s",
2644
             utils.CommaJoin(fdef.name for fdef in unknown))
2645
    return True
2646

    
2647
  return False
2648

    
2649

    
2650
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2651
                format_override=None, verbose=False, force_filter=False):
2652
  """Generic implementation for listing all items of a resource.
2653

2654
  @param resource: One of L{constants.QR_VIA_LUXI}
2655
  @type fields: list of strings
2656
  @param fields: List of fields to query for
2657
  @type names: list of strings
2658
  @param names: Names of items to query for
2659
  @type unit: string or None
2660
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2661
    None for automatic choice (human-readable for non-separator usage,
2662
    otherwise megabytes); this is a one-letter string
2663
  @type separator: string or None
2664
  @param separator: String used to separate fields
2665
  @type header: bool
2666
  @param header: Whether to show header row
2667
  @type force_filter: bool
2668
  @param force_filter: Whether to always treat names as filter
2669
  @type format_override: dict
2670
  @param format_override: Dictionary for overriding field formatting functions,
2671
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2672
  @type verbose: boolean
2673
  @param verbose: whether to use verbose field descriptions or not
2674

2675
  """
2676
  if cl is None:
2677
    cl = GetClient()
2678

    
2679
  if not names:
2680
    names = None
2681

    
2682
  filter_ = qlang.MakeFilter(names, force_filter)
2683

    
2684
  response = cl.Query(resource, fields, filter_)
2685

    
2686
  found_unknown = _WarnUnknownFields(response.fields)
2687

    
2688
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2689
                                     header=header,
2690
                                     format_override=format_override,
2691
                                     verbose=verbose)
2692

    
2693
  for line in data:
2694
    ToStdout(line)
2695

    
2696
  assert ((found_unknown and status == QR_UNKNOWN) or
2697
          (not found_unknown and status != QR_UNKNOWN))
2698

    
2699
  if status == QR_UNKNOWN:
2700
    return constants.EXIT_UNKNOWN_FIELD
2701

    
2702
  # TODO: Should the list command fail if not all data could be collected?
2703
  return constants.EXIT_SUCCESS
2704

    
2705

    
2706
def GenericListFields(resource, fields, separator, header, cl=None):
2707
  """Generic implementation for listing fields for a resource.
2708

2709
  @param resource: One of L{constants.QR_VIA_LUXI}
2710
  @type fields: list of strings
2711
  @param fields: List of fields to query for
2712
  @type separator: string or None
2713
  @param separator: String used to separate fields
2714
  @type header: bool
2715
  @param header: Whether to show header row
2716

2717
  """
2718
  if cl is None:
2719
    cl = GetClient()
2720

    
2721
  if not fields:
2722
    fields = None
2723

    
2724
  response = cl.QueryFields(resource, fields)
2725

    
2726
  found_unknown = _WarnUnknownFields(response.fields)
2727

    
2728
  columns = [
2729
    TableColumn("Name", str, False),
2730
    TableColumn("Title", str, False),
2731
    TableColumn("Description", str, False),
2732
    ]
2733

    
2734
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2735

    
2736
  for line in FormatTable(rows, columns, header, separator):
2737
    ToStdout(line)
2738

    
2739
  if found_unknown:
2740
    return constants.EXIT_UNKNOWN_FIELD
2741

    
2742
  return constants.EXIT_SUCCESS
2743

    
2744

    
2745
class TableColumn:
2746
  """Describes a column for L{FormatTable}.
2747

2748
  """
2749
  def __init__(self, title, fn, align_right):
2750
    """Initializes this class.
2751

2752
    @type title: string
2753
    @param title: Column title
2754
    @type fn: callable
2755
    @param fn: Formatting function
2756
    @type align_right: bool
2757
    @param align_right: Whether to align values on the right-hand side
2758

2759
    """
2760
    self.title = title
2761
    self.format = fn
2762
    self.align_right = align_right
2763

    
2764

    
2765
def _GetColFormatString(width, align_right):
2766
  """Returns the format string for a field.
2767

2768
  """
2769
  if align_right:
2770
    sign = ""
2771
  else:
2772
    sign = "-"
2773

    
2774
  return "%%%s%ss" % (sign, width)
2775

    
2776

    
2777
def FormatTable(rows, columns, header, separator):
2778
  """Formats data as a table.
2779

2780
  @type rows: list of lists
2781
  @param rows: Row data, one list per row
2782
  @type columns: list of L{TableColumn}
2783
  @param columns: Column descriptions
2784
  @type header: bool
2785
  @param header: Whether to show header row
2786
  @type separator: string or None
2787
  @param separator: String used to separate columns
2788

2789
  """
2790
  if header:
2791
    data = [[col.title for col in columns]]
2792
    colwidth = [len(col.title) for col in columns]
2793
  else:
2794
    data = []
2795
    colwidth = [0 for _ in columns]
2796

    
2797
  # Format row data
2798
  for row in rows:
2799
    assert len(row) == len(columns)
2800

    
2801
    formatted = [col.format(value) for value, col in zip(row, columns)]
2802

    
2803
    if separator is None:
2804
      # Update column widths
2805
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2806
        # Modifying a list's items while iterating is fine
2807
        colwidth[idx] = max(oldwidth, len(value))
2808

    
2809
    data.append(formatted)
2810

    
2811
  if separator is not None:
2812
    # Return early if a separator is used
2813
    return [separator.join(row) for row in data]
2814

    
2815
  if columns and not columns[-1].align_right:
2816
    # Avoid unnecessary spaces at end of line
2817
    colwidth[-1] = 0
2818

    
2819
  # Build format string
2820
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2821
                  for col, width in zip(columns, colwidth)])
2822

    
2823
  return [fmt % tuple(row) for row in data]
2824

    
2825

    
2826
def FormatTimestamp(ts):
2827
  """Formats a given timestamp.
2828

2829
  @type ts: timestamp
2830
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2831

2832
  @rtype: string
2833
  @return: a string with the formatted timestamp
2834

2835
  """
2836
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2837
    return "?"
2838
  sec, usec = ts
2839
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2840

    
2841

    
2842
def ParseTimespec(value):
2843
  """Parse a time specification.
2844

2845
  The following suffixed will be recognized:
2846

2847
    - s: seconds
2848
    - m: minutes
2849
    - h: hours
2850
    - d: day
2851
    - w: weeks
2852

2853
  Without any suffix, the value will be taken to be in seconds.
2854

2855
  """
2856
  value = str(value)
2857
  if not value:
2858
    raise errors.OpPrereqError("Empty time specification passed")
2859
  suffix_map = {
2860
    "s": 1,
2861
    "m": 60,
2862
    "h": 3600,
2863
    "d": 86400,
2864
    "w": 604800,
2865
    }
2866
  if value[-1] not in suffix_map:
2867
    try:
2868
      value = int(value)
2869
    except (TypeError, ValueError):
2870
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2871
  else:
2872
    multiplier = suffix_map[value[-1]]
2873
    value = value[:-1]
2874
    if not value: # no data left after stripping the suffix
2875
      raise errors.OpPrereqError("Invalid time specification (only"
2876
                                 " suffix passed)")
2877
    try:
2878
      value = int(value) * multiplier
2879
    except (TypeError, ValueError):
2880
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2881
  return value
2882

    
2883

    
2884
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2885
                   filter_master=False, nodegroup=None):
2886
  """Returns the names of online nodes.
2887

2888
  This function will also log a warning on stderr with the names of
2889
  the online nodes.
2890

2891
  @param nodes: if not empty, use only this subset of nodes (minus the
2892
      offline ones)
2893
  @param cl: if not None, luxi client to use
2894
  @type nowarn: boolean
2895
  @param nowarn: by default, this function will output a note with the
2896
      offline nodes that are skipped; if this parameter is True the
2897
      note is not displayed
2898
  @type secondary_ips: boolean
2899
  @param secondary_ips: if True, return the secondary IPs instead of the
2900
      names, useful for doing network traffic over the replication interface
2901
      (if any)
2902
  @type filter_master: boolean
2903
  @param filter_master: if True, do not return the master node in the list
2904
      (useful in coordination with secondary_ips where we cannot check our
2905
      node name against the list)
2906
  @type nodegroup: string
2907
  @param nodegroup: If set, only return nodes in this node group
2908

2909
  """
2910
  if cl is None:
2911
    cl = GetClient()
2912

    
2913
  filter_ = []
2914

    
2915
  if nodes:
2916
    filter_.append(qlang.MakeSimpleFilter("name", nodes))
2917

    
2918
  if nodegroup is not None:
2919
    filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2920
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2921

    
2922
  if filter_master:
2923
    filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2924

    
2925
  if filter_:
2926
    if len(filter_) > 1:
2927
      final_filter = [qlang.OP_AND] + filter_
2928
    else:
2929
      assert len(filter_) == 1
2930
      final_filter = filter_[0]
2931
  else:
2932
    final_filter = None
2933

    
2934
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2935

    
2936
  def _IsOffline(row):
2937
    (_, (_, offline), _) = row
2938
    return offline
2939

    
2940
  def _GetName(row):
2941
    ((_, name), _, _) = row
2942
    return name
2943

    
2944
  def _GetSip(row):
2945
    (_, _, (_, sip)) = row
2946
    return sip
2947

    
2948
  (offline, online) = compat.partition(result.data, _IsOffline)
2949

    
2950
  if offline and not nowarn:
2951
    ToStderr("Note: skipping offline node(s): %s" %
2952
             utils.CommaJoin(map(_GetName, offline)))
2953

    
2954
  if secondary_ips:
2955
    fn = _GetSip
2956
  else:
2957
    fn = _GetName
2958

    
2959
  return map(fn, online)
2960

    
2961

    
2962
def _ToStream(stream, txt, *args):
2963
  """Write a message to a stream, bypassing the logging system
2964

2965
  @type stream: file object
2966
  @param stream: the file to which we should write
2967
  @type txt: str
2968
  @param txt: the message
2969

2970
  """
2971
  try:
2972
    if args:
2973
      args = tuple(args)
2974
      stream.write(txt % args)
2975
    else:
2976
      stream.write(txt)
2977
    stream.write("\n")
2978
    stream.flush()
2979
  except IOError, err:
2980
    if err.errno == errno.EPIPE:
2981
      # our terminal went away, we'll exit
2982
      sys.exit(constants.EXIT_FAILURE)
2983
    else:
2984
      raise
2985

    
2986

    
2987
def ToStdout(txt, *args):
2988
  """Write a message to stdout only, bypassing the logging system
2989

2990
  This is just a wrapper over _ToStream.
2991

2992
  @type txt: str
2993
  @param txt: the message
2994

2995
  """
2996
  _ToStream(sys.stdout, txt, *args)
2997

    
2998

    
2999
def ToStderr(txt, *args):
3000
  """Write a message to stderr only, bypassing the logging system
3001

3002
  This is just a wrapper over _ToStream.
3003

3004
  @type txt: str
3005
  @param txt: the message
3006

3007
  """
3008
  _ToStream(sys.stderr, txt, *args)
3009

    
3010

    
3011
class JobExecutor(object):
3012
  """Class which manages the submission and execution of multiple jobs.
3013

3014
  Note that instances of this class should not be reused between
3015
  GetResults() calls.
3016

3017
  """
3018
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3019
    self.queue = []
3020
    if cl is None:
3021
      cl = GetClient()
3022
    self.cl = cl
3023
    self.verbose = verbose
3024
    self.jobs = []
3025
    self.opts = opts
3026
    self.feedback_fn = feedback_fn
3027
    self._counter = itertools.count()
3028

    
3029
  @staticmethod
3030
  def _IfName(name, fmt):
3031
    """Helper function for formatting name.
3032

3033
    """
3034
    if name:
3035
      return fmt % name
3036

    
3037
    return ""
3038

    
3039
  def QueueJob(self, name, *ops):
3040
    """Record a job for later submit.
3041

3042
    @type name: string
3043
    @param name: a description of the job, will be used in WaitJobSet
3044

3045
    """
3046
    SetGenericOpcodeOpts(ops, self.opts)
3047
    self.queue.append((self._counter.next(), name, ops))
3048

    
3049
  def AddJobId(self, name, status, job_id):
3050
    """Adds a job ID to the internal queue.
3051

3052
    """
3053
    self.jobs.append((self._counter.next(), status, job_id, name))
3054

    
3055
  def SubmitPending(self, each=False):
3056
    """Submit all pending jobs.
3057

3058
    """
3059
    if each:
3060
      results = []
3061
      for (_, _, ops) in self.queue:
3062
        # SubmitJob will remove the success status, but raise an exception if
3063
        # the submission fails, so we'll notice that anyway.
3064
        results.append([True, self.cl.SubmitJob(ops)])
3065
    else:
3066
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3067
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3068
      self.jobs.append((idx, status, data, name))
3069

    
3070
  def _ChooseJob(self):
3071
    """Choose a non-waiting/queued job to poll next.
3072

3073
    """
3074
    assert self.jobs, "_ChooseJob called with empty job list"
3075

    
3076
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
3077
    assert result
3078

    
3079
    for job_data, status in zip(self.jobs, result):
3080
      if (isinstance(status, list) and status and
3081
          status[0] in (constants.JOB_STATUS_QUEUED,
3082
                        constants.JOB_STATUS_WAITING,
3083
                        constants.JOB_STATUS_CANCELING)):
3084
        # job is still present and waiting
3085
        continue
3086
      # good candidate found (either running job or lost job)
3087
      self.jobs.remove(job_data)
3088
      return job_data
3089

    
3090
    # no job found
3091
    return self.jobs.pop(0)
3092

    
3093
  def GetResults(self):
3094
    """Wait for and return the results of all jobs.
3095

3096
    @rtype: list
3097
    @return: list of tuples (success, job results), in the same order
3098
        as the submitted jobs; if a job has failed, instead of the result
3099
        there will be the error message
3100

3101
    """
3102
    if not self.jobs:
3103
      self.SubmitPending()
3104
    results = []
3105
    if self.verbose:
3106
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3107
      if ok_jobs:
3108
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3109

    
3110
    # first, remove any non-submitted jobs
3111
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3112
    for idx, _, jid, name in failures:
3113
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3114
      results.append((idx, False, jid))
3115

    
3116
    while self.jobs:
3117
      (idx, _, jid, name) = self._ChooseJob()
3118
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3119
      try:
3120
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3121
        success = True
3122
      except errors.JobLost, err:
3123
        _, job_result = FormatError(err)
3124
        ToStderr("Job %s%s has been archived, cannot check its result",
3125
                 jid, self._IfName(name, " for %s"))
3126
        success = False
3127
      except (errors.GenericError, luxi.ProtocolError), err:
3128
        _, job_result = FormatError(err)
3129
        success = False
3130
        # the error message will always be shown, verbose or not
3131
        ToStderr("Job %s%s has failed: %s",
3132
                 jid, self._IfName(name, " for %s"), job_result)
3133

    
3134
      results.append((idx, success, job_result))
3135

    
3136
    # sort based on the index, then drop it
3137
    results.sort()
3138
    results = [i[1:] for i in results]
3139

    
3140
    return results
3141

    
3142
  def WaitOrShow(self, wait):
3143
    """Wait for job results or only print the job IDs.
3144

3145
    @type wait: boolean
3146
    @param wait: whether to wait or not
3147

3148
    """
3149
    if wait:
3150
      return self.GetResults()
3151
    else:
3152
      if not self.jobs:
3153
        self.SubmitPending()
3154
      for _, status, result, name in self.jobs:
3155
        if status:
3156
          ToStdout("%s: %s", result, name)
3157
        else:
3158
          ToStderr("Failure for %s: %s", name, result)
3159
      return [row[1:3] for row in self.jobs]
3160

    
3161

    
3162
def FormatParameterDict(buf, param_dict, actual, level=1):
3163
  """Formats a parameter dictionary.
3164

3165
  @type buf: L{StringIO}
3166
  @param buf: the buffer into which to write
3167
  @type param_dict: dict
3168
  @param param_dict: the own parameters
3169
  @type actual: dict
3170
  @param actual: the current parameter set (including defaults)
3171
  @param level: Level of indent
3172

3173
  """
3174
  indent = "  " * level
3175
  for key in sorted(actual):
3176
    val = param_dict.get(key, "default (%s)" % actual[key])
3177
    buf.write("%s- %s: %s\n" % (indent, key, val))
3178

    
3179

    
3180
def ConfirmOperation(names, list_type, text, extra=""):
3181
  """Ask the user to confirm an operation on a list of list_type.
3182

3183
  This function is used to request confirmation for doing an operation
3184
  on a given list of list_type.
3185

3186
  @type names: list
3187
  @param names: the list of names that we display when
3188
      we ask for confirmation
3189
  @type list_type: str
3190
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3191
  @type text: str
3192
  @param text: the operation that the user should confirm
3193
  @rtype: boolean
3194
  @return: True or False depending on user's confirmation.
3195

3196
  """
3197
  count = len(names)
3198
  msg = ("The %s will operate on %d %s.\n%s"
3199
         "Do you want to continue?" % (text, count, list_type, extra))
3200
  affected = (("\nAffected %s:\n" % list_type) +
3201
              "\n".join(["  %s" % name for name in names]))
3202

    
3203
  choices = [("y", True, "Yes, execute the %s" % text),
3204
             ("n", False, "No, abort the %s" % text)]
3205

    
3206
  if count > 20:
3207
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3208
    question = msg
3209
  else:
3210
    question = msg + affected
3211

    
3212
  choice = AskUser(question, choices)
3213
  if choice == "v":
3214
    choices.pop(1)
3215
    choice = AskUser(msg + affected, choices)
3216
  return choice