Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 819cbfe5

History | View | Annotate | Download (99.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "ALLOW_FAILOVER_OPT",
55
  "AUTO_PROMOTE_OPT",
56
  "AUTO_REPLACE_OPT",
57
  "BACKEND_OPT",
58
  "BLK_OS_OPT",
59
  "CAPAB_MASTER_OPT",
60
  "CAPAB_VM_OPT",
61
  "CLEANUP_OPT",
62
  "CLUSTER_DOMAIN_SECRET_OPT",
63
  "CONFIRM_OPT",
64
  "CP_SIZE_OPT",
65
  "DEBUG_OPT",
66
  "DEBUG_SIMERR_OPT",
67
  "DISKIDX_OPT",
68
  "DISK_OPT",
69
  "DISK_TEMPLATE_OPT",
70
  "DRAINED_OPT",
71
  "DRY_RUN_OPT",
72
  "DRBD_HELPER_OPT",
73
  "DST_NODE_OPT",
74
  "EARLY_RELEASE_OPT",
75
  "ENABLED_HV_OPT",
76
  "ERROR_CODES_OPT",
77
  "FIELDS_OPT",
78
  "FILESTORE_DIR_OPT",
79
  "FILESTORE_DRIVER_OPT",
80
  "FORCE_FILTER_OPT",
81
  "FORCE_OPT",
82
  "FORCE_VARIANT_OPT",
83
  "GLOBAL_FILEDIR_OPT",
84
  "HID_OS_OPT",
85
  "GLOBAL_SHARED_FILEDIR_OPT",
86
  "HVLIST_OPT",
87
  "HVOPTS_OPT",
88
  "HYPERVISOR_OPT",
89
  "IALLOCATOR_OPT",
90
  "DEFAULT_IALLOCATOR_OPT",
91
  "IDENTIFY_DEFAULTS_OPT",
92
  "IGNORE_CONSIST_OPT",
93
  "IGNORE_FAILURES_OPT",
94
  "IGNORE_OFFLINE_OPT",
95
  "IGNORE_REMOVE_FAILURES_OPT",
96
  "IGNORE_SECONDARIES_OPT",
97
  "IGNORE_SIZE_OPT",
98
  "INTERVAL_OPT",
99
  "MAC_PREFIX_OPT",
100
  "MAINTAIN_NODE_HEALTH_OPT",
101
  "MASTER_NETDEV_OPT",
102
  "MC_OPT",
103
  "MIGRATION_MODE_OPT",
104
  "NET_OPT",
105
  "NEW_CLUSTER_CERT_OPT",
106
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
107
  "NEW_CONFD_HMAC_KEY_OPT",
108
  "NEW_RAPI_CERT_OPT",
109
  "NEW_SECONDARY_OPT",
110
  "NIC_PARAMS_OPT",
111
  "NODE_FORCE_JOIN_OPT",
112
  "NODE_LIST_OPT",
113
  "NODE_PLACEMENT_OPT",
114
  "NODEGROUP_OPT",
115
  "NODE_PARAMS_OPT",
116
  "NODE_POWERED_OPT",
117
  "NODRBD_STORAGE_OPT",
118
  "NOHDR_OPT",
119
  "NOIPCHECK_OPT",
120
  "NO_INSTALL_OPT",
121
  "NONAMECHECK_OPT",
122
  "NOLVM_STORAGE_OPT",
123
  "NOMODIFY_ETCHOSTS_OPT",
124
  "NOMODIFY_SSH_SETUP_OPT",
125
  "NONICS_OPT",
126
  "NONLIVE_OPT",
127
  "NONPLUS1_OPT",
128
  "NOSHUTDOWN_OPT",
129
  "NOSTART_OPT",
130
  "NOSSH_KEYCHECK_OPT",
131
  "NOVOTING_OPT",
132
  "NWSYNC_OPT",
133
  "ON_PRIMARY_OPT",
134
  "ON_SECONDARY_OPT",
135
  "OFFLINE_OPT",
136
  "OSPARAMS_OPT",
137
  "OS_OPT",
138
  "OS_SIZE_OPT",
139
  "OOB_TIMEOUT_OPT",
140
  "POWER_DELAY_OPT",
141
  "PREALLOC_WIPE_DISKS_OPT",
142
  "PRIMARY_IP_VERSION_OPT",
143
  "PRIORITY_OPT",
144
  "RAPI_CERT_OPT",
145
  "READD_OPT",
146
  "REBOOT_TYPE_OPT",
147
  "REMOVE_INSTANCE_OPT",
148
  "REMOVE_UIDS_OPT",
149
  "RESERVED_LVS_OPT",
150
  "ROMAN_OPT",
151
  "SECONDARY_IP_OPT",
152
  "SELECT_OS_OPT",
153
  "SEP_OPT",
154
  "SHOWCMD_OPT",
155
  "SHUTDOWN_TIMEOUT_OPT",
156
  "SINGLE_NODE_OPT",
157
  "SRC_DIR_OPT",
158
  "SRC_NODE_OPT",
159
  "SUBMIT_OPT",
160
  "STATIC_OPT",
161
  "SYNC_OPT",
162
  "TAG_SRC_OPT",
163
  "TIMEOUT_OPT",
164
  "UIDPOOL_OPT",
165
  "USEUNITS_OPT",
166
  "USE_REPL_NET_OPT",
167
  "VERBOSE_OPT",
168
  "VG_NAME_OPT",
169
  "YES_DOIT_OPT",
170
  # Generic functions for CLI programs
171
  "ConfirmOperation",
172
  "GenericMain",
173
  "GenericInstanceCreate",
174
  "GenericList",
175
  "GenericListFields",
176
  "GetClient",
177
  "GetOnlineNodes",
178
  "JobExecutor",
179
  "JobSubmittedException",
180
  "ParseTimespec",
181
  "RunWhileClusterStopped",
182
  "SubmitOpCode",
183
  "SubmitOrSend",
184
  "UsesRPC",
185
  # Formatting functions
186
  "ToStderr", "ToStdout",
187
  "FormatError",
188
  "FormatQueryResult",
189
  "FormatParameterDict",
190
  "GenerateTable",
191
  "AskUser",
192
  "FormatTimestamp",
193
  "FormatLogMessage",
194
  # Tags functions
195
  "ListTags",
196
  "AddTags",
197
  "RemoveTags",
198
  # command line options support infrastructure
199
  "ARGS_MANY_INSTANCES",
200
  "ARGS_MANY_NODES",
201
  "ARGS_MANY_GROUPS",
202
  "ARGS_NONE",
203
  "ARGS_ONE_INSTANCE",
204
  "ARGS_ONE_NODE",
205
  "ARGS_ONE_GROUP",
206
  "ARGS_ONE_OS",
207
  "ArgChoice",
208
  "ArgCommand",
209
  "ArgFile",
210
  "ArgGroup",
211
  "ArgHost",
212
  "ArgInstance",
213
  "ArgJobId",
214
  "ArgNode",
215
  "ArgOs",
216
  "ArgSuggest",
217
  "ArgUnknown",
218
  "OPT_COMPL_INST_ADD_NODES",
219
  "OPT_COMPL_MANY_NODES",
220
  "OPT_COMPL_ONE_IALLOCATOR",
221
  "OPT_COMPL_ONE_INSTANCE",
222
  "OPT_COMPL_ONE_NODE",
223
  "OPT_COMPL_ONE_NODEGROUP",
224
  "OPT_COMPL_ONE_OS",
225
  "cli_option",
226
  "SplitNodeOption",
227
  "CalculateOSNames",
228
  "ParseFields",
229
  "COMMON_CREATE_OPTS",
230
  ]
231

    
232
NO_PREFIX = "no_"
233
UN_PREFIX = "-"
234

    
235
#: Priorities (sorted)
236
_PRIORITY_NAMES = [
237
  ("low", constants.OP_PRIO_LOW),
238
  ("normal", constants.OP_PRIO_NORMAL),
239
  ("high", constants.OP_PRIO_HIGH),
240
  ]
241

    
242
#: Priority dictionary for easier lookup
243
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
244
# we migrate to Python 2.6
245
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
246

    
247
# Query result status for clients
248
(QR_NORMAL,
249
 QR_UNKNOWN,
250
 QR_INCOMPLETE) = range(3)
251

    
252

    
253
class _Argument:
254
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
255
    self.min = min
256
    self.max = max
257

    
258
  def __repr__(self):
259
    return ("<%s min=%s max=%s>" %
260
            (self.__class__.__name__, self.min, self.max))
261

    
262

    
263
class ArgSuggest(_Argument):
264
  """Suggesting argument.
265

266
  Value can be any of the ones passed to the constructor.
267

268
  """
269
  # pylint: disable-msg=W0622
270
  def __init__(self, min=0, max=None, choices=None):
271
    _Argument.__init__(self, min=min, max=max)
272
    self.choices = choices
273

    
274
  def __repr__(self):
275
    return ("<%s min=%s max=%s choices=%r>" %
276
            (self.__class__.__name__, self.min, self.max, self.choices))
277

    
278

    
279
class ArgChoice(ArgSuggest):
280
  """Choice argument.
281

282
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
283
  but value must be one of the choices.
284

285
  """
286

    
287

    
288
class ArgUnknown(_Argument):
289
  """Unknown argument to program (e.g. determined at runtime).
290

291
  """
292

    
293

    
294
class ArgInstance(_Argument):
295
  """Instances argument.
296

297
  """
298

    
299

    
300
class ArgNode(_Argument):
301
  """Node argument.
302

303
  """
304

    
305

    
306
class ArgGroup(_Argument):
307
  """Node group argument.
308

309
  """
310

    
311

    
312
class ArgJobId(_Argument):
313
  """Job ID argument.
314

315
  """
316

    
317

    
318
class ArgFile(_Argument):
319
  """File path argument.
320

321
  """
322

    
323

    
324
class ArgCommand(_Argument):
325
  """Command argument.
326

327
  """
328

    
329

    
330
class ArgHost(_Argument):
331
  """Host argument.
332

333
  """
334

    
335

    
336
class ArgOs(_Argument):
337
  """OS argument.
338

339
  """
340

    
341

    
342
ARGS_NONE = []
343
ARGS_MANY_INSTANCES = [ArgInstance()]
344
ARGS_MANY_NODES = [ArgNode()]
345
ARGS_MANY_GROUPS = [ArgGroup()]
346
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
347
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
348
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
349
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
350

    
351

    
352
def _ExtractTagsObject(opts, args):
353
  """Extract the tag type object.
354

355
  Note that this function will modify its args parameter.
356

357
  """
358
  if not hasattr(opts, "tag_type"):
359
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
360
  kind = opts.tag_type
361
  if kind == constants.TAG_CLUSTER:
362
    retval = kind, kind
363
  elif kind in (constants.TAG_NODEGROUP,
364
                constants.TAG_NODE,
365
                constants.TAG_INSTANCE):
366
    if not args:
367
      raise errors.OpPrereqError("no arguments passed to the command")
368
    name = args.pop(0)
369
    retval = kind, name
370
  else:
371
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
372
  return retval
373

    
374

    
375
def _ExtendTags(opts, args):
376
  """Extend the args if a source file has been given.
377

378
  This function will extend the tags with the contents of the file
379
  passed in the 'tags_source' attribute of the opts parameter. A file
380
  named '-' will be replaced by stdin.
381

382
  """
383
  fname = opts.tags_source
384
  if fname is None:
385
    return
386
  if fname == "-":
387
    new_fh = sys.stdin
388
  else:
389
    new_fh = open(fname, "r")
390
  new_data = []
391
  try:
392
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
393
    # because of python bug 1633941
394
    while True:
395
      line = new_fh.readline()
396
      if not line:
397
        break
398
      new_data.append(line.strip())
399
  finally:
400
    new_fh.close()
401
  args.extend(new_data)
402

    
403

    
404
def ListTags(opts, args):
405
  """List the tags on a given object.
406

407
  This is a generic implementation that knows how to deal with all
408
  three cases of tag objects (cluster, node, instance). The opts
409
  argument is expected to contain a tag_type field denoting what
410
  object type we work on.
411

412
  """
413
  kind, name = _ExtractTagsObject(opts, args)
414
  cl = GetClient()
415
  result = cl.QueryTags(kind, name)
416
  result = list(result)
417
  result.sort()
418
  for tag in result:
419
    ToStdout(tag)
420

    
421

    
422
def AddTags(opts, args):
423
  """Add tags on a given object.
424

425
  This is a generic implementation that knows how to deal with all
426
  three cases of tag objects (cluster, node, instance). The opts
427
  argument is expected to contain a tag_type field denoting what
428
  object type we work on.
429

430
  """
431
  kind, name = _ExtractTagsObject(opts, args)
432
  _ExtendTags(opts, args)
433
  if not args:
434
    raise errors.OpPrereqError("No tags to be added")
435
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
436
  SubmitOpCode(op, opts=opts)
437

    
438

    
439
def RemoveTags(opts, args):
440
  """Remove tags from a given object.
441

442
  This is a generic implementation that knows how to deal with all
443
  three cases of tag objects (cluster, node, instance). The opts
444
  argument is expected to contain a tag_type field denoting what
445
  object type we work on.
446

447
  """
448
  kind, name = _ExtractTagsObject(opts, args)
449
  _ExtendTags(opts, args)
450
  if not args:
451
    raise errors.OpPrereqError("No tags to be removed")
452
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
453
  SubmitOpCode(op, opts=opts)
454

    
455

    
456
def check_unit(option, opt, value): # pylint: disable-msg=W0613
457
  """OptParsers custom converter for units.
458

459
  """
460
  try:
461
    return utils.ParseUnit(value)
462
  except errors.UnitParseError, err:
463
    raise OptionValueError("option %s: %s" % (opt, err))
464

    
465

    
466
def _SplitKeyVal(opt, data):
467
  """Convert a KeyVal string into a dict.
468

469
  This function will convert a key=val[,...] string into a dict. Empty
470
  values will be converted specially: keys which have the prefix 'no_'
471
  will have the value=False and the prefix stripped, the others will
472
  have value=True.
473

474
  @type opt: string
475
  @param opt: a string holding the option name for which we process the
476
      data, used in building error messages
477
  @type data: string
478
  @param data: a string of the format key=val,key=val,...
479
  @rtype: dict
480
  @return: {key=val, key=val}
481
  @raises errors.ParameterError: if there are duplicate keys
482

483
  """
484
  kv_dict = {}
485
  if data:
486
    for elem in utils.UnescapeAndSplit(data, sep=","):
487
      if "=" in elem:
488
        key, val = elem.split("=", 1)
489
      else:
490
        if elem.startswith(NO_PREFIX):
491
          key, val = elem[len(NO_PREFIX):], False
492
        elif elem.startswith(UN_PREFIX):
493
          key, val = elem[len(UN_PREFIX):], None
494
        else:
495
          key, val = elem, True
496
      if key in kv_dict:
497
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
498
                                    (key, opt))
499
      kv_dict[key] = val
500
  return kv_dict
501

    
502

    
503
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
504
  """Custom parser for ident:key=val,key=val options.
505

506
  This will store the parsed values as a tuple (ident, {key: val}). As such,
507
  multiple uses of this option via action=append is possible.
508

509
  """
510
  if ":" not in value:
511
    ident, rest = value, ''
512
  else:
513
    ident, rest = value.split(":", 1)
514

    
515
  if ident.startswith(NO_PREFIX):
516
    if rest:
517
      msg = "Cannot pass options when removing parameter groups: %s" % value
518
      raise errors.ParameterError(msg)
519
    retval = (ident[len(NO_PREFIX):], False)
520
  elif ident.startswith(UN_PREFIX):
521
    if rest:
522
      msg = "Cannot pass options when removing parameter groups: %s" % value
523
      raise errors.ParameterError(msg)
524
    retval = (ident[len(UN_PREFIX):], None)
525
  else:
526
    kv_dict = _SplitKeyVal(opt, rest)
527
    retval = (ident, kv_dict)
528
  return retval
529

    
530

    
531
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
532
  """Custom parser class for key=val,key=val options.
533

534
  This will store the parsed values as a dict {key: val}.
535

536
  """
537
  return _SplitKeyVal(opt, value)
538

    
539

    
540
def check_bool(option, opt, value): # pylint: disable-msg=W0613
541
  """Custom parser for yes/no options.
542

543
  This will store the parsed value as either True or False.
544

545
  """
546
  value = value.lower()
547
  if value == constants.VALUE_FALSE or value == "no":
548
    return False
549
  elif value == constants.VALUE_TRUE or value == "yes":
550
    return True
551
  else:
552
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
553

    
554

    
555
# completion_suggestion is normally a list. Using numeric values not evaluating
556
# to False for dynamic completion.
557
(OPT_COMPL_MANY_NODES,
558
 OPT_COMPL_ONE_NODE,
559
 OPT_COMPL_ONE_INSTANCE,
560
 OPT_COMPL_ONE_OS,
561
 OPT_COMPL_ONE_IALLOCATOR,
562
 OPT_COMPL_INST_ADD_NODES,
563
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
564

    
565
OPT_COMPL_ALL = frozenset([
566
  OPT_COMPL_MANY_NODES,
567
  OPT_COMPL_ONE_NODE,
568
  OPT_COMPL_ONE_INSTANCE,
569
  OPT_COMPL_ONE_OS,
570
  OPT_COMPL_ONE_IALLOCATOR,
571
  OPT_COMPL_INST_ADD_NODES,
572
  OPT_COMPL_ONE_NODEGROUP,
573
  ])
574

    
575

    
576
class CliOption(Option):
577
  """Custom option class for optparse.
578

579
  """
580
  ATTRS = Option.ATTRS + [
581
    "completion_suggest",
582
    ]
583
  TYPES = Option.TYPES + (
584
    "identkeyval",
585
    "keyval",
586
    "unit",
587
    "bool",
588
    )
589
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
590
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
591
  TYPE_CHECKER["keyval"] = check_key_val
592
  TYPE_CHECKER["unit"] = check_unit
593
  TYPE_CHECKER["bool"] = check_bool
594

    
595

    
596
# optparse.py sets make_option, so we do it for our own option class, too
597
cli_option = CliOption
598

    
599

    
600
_YORNO = "yes|no"
601

    
602
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
603
                       help="Increase debugging level")
604

    
605
NOHDR_OPT = cli_option("--no-headers", default=False,
606
                       action="store_true", dest="no_headers",
607
                       help="Don't display column headers")
608

    
609
SEP_OPT = cli_option("--separator", default=None,
610
                     action="store", dest="separator",
611
                     help=("Separator between output fields"
612
                           " (defaults to one space)"))
613

    
614
USEUNITS_OPT = cli_option("--units", default=None,
615
                          dest="units", choices=('h', 'm', 'g', 't'),
616
                          help="Specify units for output (one of h/m/g/t)")
617

    
618
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
619
                        type="string", metavar="FIELDS",
620
                        help="Comma separated list of output fields")
621

    
622
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
623
                       default=False, help="Force the operation")
624

    
625
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
626
                         default=False, help="Do not require confirmation")
627

    
628
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
629
                                  action="store_true", default=False,
630
                                  help=("Ignore offline nodes and do as much"
631
                                        " as possible"))
632

    
633
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
634
                         default=None, help="File with tag names")
635

    
636
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
637
                        default=False, action="store_true",
638
                        help=("Submit the job and return the job ID, but"
639
                              " don't wait for the job to finish"))
640

    
641
SYNC_OPT = cli_option("--sync", dest="do_locking",
642
                      default=False, action="store_true",
643
                      help=("Grab locks while doing the queries"
644
                            " in order to ensure more consistent results"))
645

    
646
DRY_RUN_OPT = cli_option("--dry-run", default=False,
647
                         action="store_true",
648
                         help=("Do not execute the operation, just run the"
649
                               " check steps and verify it it could be"
650
                               " executed"))
651

    
652
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
653
                         action="store_true",
654
                         help="Increase the verbosity of the operation")
655

    
656
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
657
                              action="store_true", dest="simulate_errors",
658
                              help="Debugging option that makes the operation"
659
                              " treat most runtime checks as failed")
660

    
661
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
662
                        default=True, action="store_false",
663
                        help="Don't wait for sync (DANGEROUS!)")
664

    
665
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
666
                               help="Custom disk setup (diskless, file,"
667
                               " plain or drbd)",
668
                               default=None, metavar="TEMPL",
669
                               choices=list(constants.DISK_TEMPLATES))
670

    
671
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
672
                        help="Do not create any network cards for"
673
                        " the instance")
674

    
675
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
676
                               help="Relative path under default cluster-wide"
677
                               " file storage dir to store file-based disks",
678
                               default=None, metavar="<DIR>")
679

    
680
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
681
                                  help="Driver to use for image files",
682
                                  default="loop", metavar="<DRIVER>",
683
                                  choices=list(constants.FILE_DRIVER))
684

    
685
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
686
                            help="Select nodes for the instance automatically"
687
                            " using the <NAME> iallocator plugin",
688
                            default=None, type="string",
689
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
690

    
691
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
692
                            metavar="<NAME>",
693
                            help="Set the default instance allocator plugin",
694
                            default=None, type="string",
695
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
696

    
697
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
698
                    metavar="<os>",
699
                    completion_suggest=OPT_COMPL_ONE_OS)
700

    
701
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
702
                         type="keyval", default={},
703
                         help="OS parameters")
704

    
705
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
706
                               action="store_true", default=False,
707
                               help="Force an unknown variant")
708

    
709
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
710
                            action="store_true", default=False,
711
                            help="Do not install the OS (will"
712
                            " enable no-start)")
713

    
714
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
715
                         type="keyval", default={},
716
                         help="Backend parameters")
717

    
718
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
719
                         default={}, dest="hvparams",
720
                         help="Hypervisor parameters")
721

    
722
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
723
                            help="Hypervisor and hypervisor options, in the"
724
                            " format hypervisor:option=value,option=value,...",
725
                            default=None, type="identkeyval")
726

    
727
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
728
                        help="Hypervisor and hypervisor options, in the"
729
                        " format hypervisor:option=value,option=value,...",
730
                        default=[], action="append", type="identkeyval")
731

    
732
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
733
                           action="store_false",
734
                           help="Don't check that the instance's IP"
735
                           " is alive")
736

    
737
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
738
                             default=True, action="store_false",
739
                             help="Don't check that the instance's name"
740
                             " is resolvable")
741

    
742
NET_OPT = cli_option("--net",
743
                     help="NIC parameters", default=[],
744
                     dest="nics", action="append", type="identkeyval")
745

    
746
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
747
                      dest="disks", action="append", type="identkeyval")
748

    
749
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
750
                         help="Comma-separated list of disks"
751
                         " indices to act on (e.g. 0,2) (optional,"
752
                         " defaults to all disks)")
753

    
754
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
755
                         help="Enforces a single-disk configuration using the"
756
                         " given disk size, in MiB unless a suffix is used",
757
                         default=None, type="unit", metavar="<size>")
758

    
759
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
760
                                dest="ignore_consistency",
761
                                action="store_true", default=False,
762
                                help="Ignore the consistency of the disks on"
763
                                " the secondary")
764

    
765
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
766
                                dest="allow_failover",
767
                                action="store_true", default=False,
768
                                help="If migration is not possible fallback to"
769
                                     " failover")
770

    
771
NONLIVE_OPT = cli_option("--non-live", dest="live",
772
                         default=True, action="store_false",
773
                         help="Do a non-live migration (this usually means"
774
                         " freeze the instance, save the state, transfer and"
775
                         " only then resume running on the secondary node)")
776

    
777
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
778
                                default=None,
779
                                choices=list(constants.HT_MIGRATION_MODES),
780
                                help="Override default migration mode (choose"
781
                                " either live or non-live")
782

    
783
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
784
                                help="Target node and optional secondary node",
785
                                metavar="<pnode>[:<snode>]",
786
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
787

    
788
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
789
                           action="append", metavar="<node>",
790
                           help="Use only this node (can be used multiple"
791
                           " times, if not given defaults to all nodes)",
792
                           completion_suggest=OPT_COMPL_ONE_NODE)
793

    
794
NODEGROUP_OPT = cli_option("-g", "--node-group",
795
                           dest="nodegroup",
796
                           help="Node group (name or uuid)",
797
                           metavar="<nodegroup>",
798
                           default=None, type="string",
799
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
800

    
801
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
802
                             metavar="<node>",
803
                             completion_suggest=OPT_COMPL_ONE_NODE)
804

    
805
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
806
                         action="store_false",
807
                         help="Don't start the instance after creation")
808

    
809
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
810
                         action="store_true", default=False,
811
                         help="Show command instead of executing it")
812

    
813
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
814
                         default=False, action="store_true",
815
                         help="Instead of performing the migration, try to"
816
                         " recover from a failed cleanup. This is safe"
817
                         " to run even if the instance is healthy, but it"
818
                         " will create extra replication traffic and "
819
                         " disrupt briefly the replication (like during the"
820
                         " migration")
821

    
822
STATIC_OPT = cli_option("-s", "--static", dest="static",
823
                        action="store_true", default=False,
824
                        help="Only show configuration data, not runtime data")
825

    
826
ALL_OPT = cli_option("--all", dest="show_all",
827
                     default=False, action="store_true",
828
                     help="Show info on all instances on the cluster."
829
                     " This can take a long time to run, use wisely")
830

    
831
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
832
                           action="store_true", default=False,
833
                           help="Interactive OS reinstall, lists available"
834
                           " OS templates for selection")
835

    
836
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
837
                                 action="store_true", default=False,
838
                                 help="Remove the instance from the cluster"
839
                                 " configuration even if there are failures"
840
                                 " during the removal process")
841

    
842
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
843
                                        dest="ignore_remove_failures",
844
                                        action="store_true", default=False,
845
                                        help="Remove the instance from the"
846
                                        " cluster configuration even if there"
847
                                        " are failures during the removal"
848
                                        " process")
849

    
850
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
851
                                 action="store_true", default=False,
852
                                 help="Remove the instance from the cluster")
853

    
854
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
855
                               help="Specifies the new node for the instance",
856
                               metavar="NODE", default=None,
857
                               completion_suggest=OPT_COMPL_ONE_NODE)
858

    
859
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
860
                               help="Specifies the new secondary node",
861
                               metavar="NODE", default=None,
862
                               completion_suggest=OPT_COMPL_ONE_NODE)
863

    
864
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
865
                            default=False, action="store_true",
866
                            help="Replace the disk(s) on the primary"
867
                            " node (only for the drbd template)")
868

    
869
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
870
                              default=False, action="store_true",
871
                              help="Replace the disk(s) on the secondary"
872
                              " node (only for the drbd template)")
873

    
874
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
875
                              default=False, action="store_true",
876
                              help="Lock all nodes and auto-promote as needed"
877
                              " to MC status")
878

    
879
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
880
                              default=False, action="store_true",
881
                              help="Automatically replace faulty disks"
882
                              " (only for the drbd template)")
883

    
884
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
885
                             default=False, action="store_true",
886
                             help="Ignore current recorded size"
887
                             " (useful for forcing activation when"
888
                             " the recorded size is wrong)")
889

    
890
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
891
                          metavar="<node>",
892
                          completion_suggest=OPT_COMPL_ONE_NODE)
893

    
894
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
895
                         metavar="<dir>")
896

    
897
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
898
                              help="Specify the secondary ip for the node",
899
                              metavar="ADDRESS", default=None)
900

    
901
READD_OPT = cli_option("--readd", dest="readd",
902
                       default=False, action="store_true",
903
                       help="Readd old node after replacing it")
904

    
905
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
906
                                default=True, action="store_false",
907
                                help="Disable SSH key fingerprint checking")
908

    
909
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
910
                                 default=False, action="store_true",
911
                                 help="Force the joining of a node")
912

    
913
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
914
                    type="bool", default=None, metavar=_YORNO,
915
                    help="Set the master_candidate flag on the node")
916

    
917
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
918
                         type="bool", default=None,
919
                         help=("Set the offline flag on the node"
920
                               " (cluster does not communicate with offline"
921
                               " nodes)"))
922

    
923
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
924
                         type="bool", default=None,
925
                         help=("Set the drained flag on the node"
926
                               " (excluded from allocation operations)"))
927

    
928
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
929
                    type="bool", default=None, metavar=_YORNO,
930
                    help="Set the master_capable flag on the node")
931

    
932
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
933
                    type="bool", default=None, metavar=_YORNO,
934
                    help="Set the vm_capable flag on the node")
935

    
936
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
937
                             type="bool", default=None, metavar=_YORNO,
938
                             help="Set the allocatable flag on a volume")
939

    
940
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
941
                               help="Disable support for lvm based instances"
942
                               " (cluster-wide)",
943
                               action="store_false", default=True)
944

    
945
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
946
                            dest="enabled_hypervisors",
947
                            help="Comma-separated list of hypervisors",
948
                            type="string", default=None)
949

    
950
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
951
                            type="keyval", default={},
952
                            help="NIC parameters")
953

    
954
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
955
                         dest="candidate_pool_size", type="int",
956
                         help="Set the candidate pool size")
957

    
958
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
959
                         help=("Enables LVM and specifies the volume group"
960
                               " name (cluster-wide) for disk allocation"
961
                               " [%s]" % constants.DEFAULT_VG),
962
                         metavar="VG", default=None)
963

    
964
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
965
                          help="Destroy cluster", action="store_true")
966

    
967
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
968
                          help="Skip node agreement check (dangerous)",
969
                          action="store_true", default=False)
970

    
971
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
972
                            help="Specify the mac prefix for the instance IP"
973
                            " addresses, in the format XX:XX:XX",
974
                            metavar="PREFIX",
975
                            default=None)
976

    
977
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
978
                               help="Specify the node interface (cluster-wide)"
979
                               " on which the master IP address will be added"
980
                               " (cluster init default: %s)" %
981
                               constants.DEFAULT_BRIDGE,
982
                               metavar="NETDEV",
983
                               default=None)
984

    
985
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
986
                                help="Specify the default directory (cluster-"
987
                                "wide) for storing the file-based disks [%s]" %
988
                                constants.DEFAULT_FILE_STORAGE_DIR,
989
                                metavar="DIR",
990
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
991

    
992
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
993
                            dest="shared_file_storage_dir",
994
                            help="Specify the default directory (cluster-"
995
                            "wide) for storing the shared file-based"
996
                            " disks [%s]" %
997
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
998
                            metavar="SHAREDDIR",
999
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1000

    
1001
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1002
                                   help="Don't modify /etc/hosts",
1003
                                   action="store_false", default=True)
1004

    
1005
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1006
                                    help="Don't initialize SSH keys",
1007
                                    action="store_false", default=True)
1008

    
1009
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1010
                             help="Enable parseable error messages",
1011
                             action="store_true", default=False)
1012

    
1013
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1014
                          help="Skip N+1 memory redundancy tests",
1015
                          action="store_true", default=False)
1016

    
1017
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1018
                             help="Type of reboot: soft/hard/full",
1019
                             default=constants.INSTANCE_REBOOT_HARD,
1020
                             metavar="<REBOOT>",
1021
                             choices=list(constants.REBOOT_TYPES))
1022

    
1023
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1024
                                    dest="ignore_secondaries",
1025
                                    default=False, action="store_true",
1026
                                    help="Ignore errors from secondaries")
1027

    
1028
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1029
                            action="store_false", default=True,
1030
                            help="Don't shutdown the instance (unsafe)")
1031

    
1032
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1033
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1034
                         help="Maximum time to wait")
1035

    
1036
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1037
                         dest="shutdown_timeout", type="int",
1038
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1039
                         help="Maximum time to wait for instance shutdown")
1040

    
1041
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1042
                          default=None,
1043
                          help=("Number of seconds between repetions of the"
1044
                                " command"))
1045

    
1046
EARLY_RELEASE_OPT = cli_option("--early-release",
1047
                               dest="early_release", default=False,
1048
                               action="store_true",
1049
                               help="Release the locks on the secondary"
1050
                               " node(s) early")
1051

    
1052
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1053
                                  dest="new_cluster_cert",
1054
                                  default=False, action="store_true",
1055
                                  help="Generate a new cluster certificate")
1056

    
1057
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1058
                           default=None,
1059
                           help="File containing new RAPI certificate")
1060

    
1061
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1062
                               default=None, action="store_true",
1063
                               help=("Generate a new self-signed RAPI"
1064
                                     " certificate"))
1065

    
1066
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1067
                                    dest="new_confd_hmac_key",
1068
                                    default=False, action="store_true",
1069
                                    help=("Create a new HMAC key for %s" %
1070
                                          constants.CONFD))
1071

    
1072
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1073
                                       dest="cluster_domain_secret",
1074
                                       default=None,
1075
                                       help=("Load new new cluster domain"
1076
                                             " secret from file"))
1077

    
1078
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1079
                                           dest="new_cluster_domain_secret",
1080
                                           default=False, action="store_true",
1081
                                           help=("Create a new cluster domain"
1082
                                                 " secret"))
1083

    
1084
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1085
                              dest="use_replication_network",
1086
                              help="Whether to use the replication network"
1087
                              " for talking to the nodes",
1088
                              action="store_true", default=False)
1089

    
1090
MAINTAIN_NODE_HEALTH_OPT = \
1091
    cli_option("--maintain-node-health", dest="maintain_node_health",
1092
               metavar=_YORNO, default=None, type="bool",
1093
               help="Configure the cluster to automatically maintain node"
1094
               " health, by shutting down unknown instances, shutting down"
1095
               " unknown DRBD devices, etc.")
1096

    
1097
IDENTIFY_DEFAULTS_OPT = \
1098
    cli_option("--identify-defaults", dest="identify_defaults",
1099
               default=False, action="store_true",
1100
               help="Identify which saved instance parameters are equal to"
1101
               " the current cluster defaults and set them as such, instead"
1102
               " of marking them as overridden")
1103

    
1104
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1105
                         action="store", dest="uid_pool",
1106
                         help=("A list of user-ids or user-id"
1107
                               " ranges separated by commas"))
1108

    
1109
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1110
                          action="store", dest="add_uids",
1111
                          help=("A list of user-ids or user-id"
1112
                                " ranges separated by commas, to be"
1113
                                " added to the user-id pool"))
1114

    
1115
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1116
                             action="store", dest="remove_uids",
1117
                             help=("A list of user-ids or user-id"
1118
                                   " ranges separated by commas, to be"
1119
                                   " removed from the user-id pool"))
1120

    
1121
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1122
                             action="store", dest="reserved_lvs",
1123
                             help=("A comma-separated list of reserved"
1124
                                   " logical volumes names, that will be"
1125
                                   " ignored by cluster verify"))
1126

    
1127
ROMAN_OPT = cli_option("--roman",
1128
                       dest="roman_integers", default=False,
1129
                       action="store_true",
1130
                       help="Use roman numbers for positive integers")
1131

    
1132
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1133
                             action="store", default=None,
1134
                             help="Specifies usermode helper for DRBD")
1135

    
1136
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1137
                                action="store_false", default=True,
1138
                                help="Disable support for DRBD")
1139

    
1140
PRIMARY_IP_VERSION_OPT = \
1141
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1142
               action="store", dest="primary_ip_version",
1143
               metavar="%d|%d" % (constants.IP4_VERSION,
1144
                                  constants.IP6_VERSION),
1145
               help="Cluster-wide IP version for primary IP")
1146

    
1147
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1148
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1149
                          choices=_PRIONAME_TO_VALUE.keys(),
1150
                          help="Priority for opcode processing")
1151

    
1152
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1153
                        type="bool", default=None, metavar=_YORNO,
1154
                        help="Sets the hidden flag on the OS")
1155

    
1156
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1157
                        type="bool", default=None, metavar=_YORNO,
1158
                        help="Sets the blacklisted flag on the OS")
1159

    
1160
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1161
                                     type="bool", metavar=_YORNO,
1162
                                     dest="prealloc_wipe_disks",
1163
                                     help=("Wipe disks prior to instance"
1164
                                           " creation"))
1165

    
1166
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1167
                             type="keyval", default=None,
1168
                             help="Node parameters")
1169

    
1170
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1171
                              action="store", metavar="POLICY", default=None,
1172
                              help="Allocation policy for the node group")
1173

    
1174
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1175
                              type="bool", metavar=_YORNO,
1176
                              dest="node_powered",
1177
                              help="Specify if the SoR for node is powered")
1178

    
1179
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1180
                         default=constants.OOB_TIMEOUT,
1181
                         help="Maximum time to wait for out-of-band helper")
1182

    
1183
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1184
                             default=constants.OOB_POWER_DELAY,
1185
                             help="Time in seconds to wait between power-ons")
1186

    
1187
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1188
                              action="store_true", default=False,
1189
                              help=("Whether command argument should be treated"
1190
                                    " as filter"))
1191

    
1192

    
1193
#: Options provided by all commands
1194
COMMON_OPTS = [DEBUG_OPT]
1195

    
1196
# common options for creating instances. add and import then add their own
1197
# specific ones.
1198
COMMON_CREATE_OPTS = [
1199
  BACKEND_OPT,
1200
  DISK_OPT,
1201
  DISK_TEMPLATE_OPT,
1202
  FILESTORE_DIR_OPT,
1203
  FILESTORE_DRIVER_OPT,
1204
  HYPERVISOR_OPT,
1205
  IALLOCATOR_OPT,
1206
  NET_OPT,
1207
  NODE_PLACEMENT_OPT,
1208
  NOIPCHECK_OPT,
1209
  NONAMECHECK_OPT,
1210
  NONICS_OPT,
1211
  NWSYNC_OPT,
1212
  OSPARAMS_OPT,
1213
  OS_SIZE_OPT,
1214
  SUBMIT_OPT,
1215
  DRY_RUN_OPT,
1216
  PRIORITY_OPT,
1217
  ]
1218

    
1219

    
1220
def _ParseArgs(argv, commands, aliases):
1221
  """Parser for the command line arguments.
1222

1223
  This function parses the arguments and returns the function which
1224
  must be executed together with its (modified) arguments.
1225

1226
  @param argv: the command line
1227
  @param commands: dictionary with special contents, see the design
1228
      doc for cmdline handling
1229
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1230

1231
  """
1232
  if len(argv) == 0:
1233
    binary = "<command>"
1234
  else:
1235
    binary = argv[0].split("/")[-1]
1236

    
1237
  if len(argv) > 1 and argv[1] == "--version":
1238
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1239
             constants.RELEASE_VERSION)
1240
    # Quit right away. That way we don't have to care about this special
1241
    # argument. optparse.py does it the same.
1242
    sys.exit(0)
1243

    
1244
  if len(argv) < 2 or not (argv[1] in commands or
1245
                           argv[1] in aliases):
1246
    # let's do a nice thing
1247
    sortedcmds = commands.keys()
1248
    sortedcmds.sort()
1249

    
1250
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1251
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1252
    ToStdout("")
1253

    
1254
    # compute the max line length for cmd + usage
1255
    mlen = max([len(" %s" % cmd) for cmd in commands])
1256
    mlen = min(60, mlen) # should not get here...
1257

    
1258
    # and format a nice command list
1259
    ToStdout("Commands:")
1260
    for cmd in sortedcmds:
1261
      cmdstr = " %s" % (cmd,)
1262
      help_text = commands[cmd][4]
1263
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1264
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1265
      for line in help_lines:
1266
        ToStdout("%-*s   %s", mlen, "", line)
1267

    
1268
    ToStdout("")
1269

    
1270
    return None, None, None
1271

    
1272
  # get command, unalias it, and look it up in commands
1273
  cmd = argv.pop(1)
1274
  if cmd in aliases:
1275
    if cmd in commands:
1276
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1277
                                   " command" % cmd)
1278

    
1279
    if aliases[cmd] not in commands:
1280
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1281
                                   " command '%s'" % (cmd, aliases[cmd]))
1282

    
1283
    cmd = aliases[cmd]
1284

    
1285
  func, args_def, parser_opts, usage, description = commands[cmd]
1286
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1287
                        description=description,
1288
                        formatter=TitledHelpFormatter(),
1289
                        usage="%%prog %s %s" % (cmd, usage))
1290
  parser.disable_interspersed_args()
1291
  options, args = parser.parse_args()
1292

    
1293
  if not _CheckArguments(cmd, args_def, args):
1294
    return None, None, None
1295

    
1296
  return func, options, args
1297

    
1298

    
1299
def _CheckArguments(cmd, args_def, args):
1300
  """Verifies the arguments using the argument definition.
1301

1302
  Algorithm:
1303

1304
    1. Abort with error if values specified by user but none expected.
1305

1306
    1. For each argument in definition
1307

1308
      1. Keep running count of minimum number of values (min_count)
1309
      1. Keep running count of maximum number of values (max_count)
1310
      1. If it has an unlimited number of values
1311

1312
        1. Abort with error if it's not the last argument in the definition
1313

1314
    1. If last argument has limited number of values
1315

1316
      1. Abort with error if number of values doesn't match or is too large
1317

1318
    1. Abort with error if user didn't pass enough values (min_count)
1319

1320
  """
1321
  if args and not args_def:
1322
    ToStderr("Error: Command %s expects no arguments", cmd)
1323
    return False
1324

    
1325
  min_count = None
1326
  max_count = None
1327
  check_max = None
1328

    
1329
  last_idx = len(args_def) - 1
1330

    
1331
  for idx, arg in enumerate(args_def):
1332
    if min_count is None:
1333
      min_count = arg.min
1334
    elif arg.min is not None:
1335
      min_count += arg.min
1336

    
1337
    if max_count is None:
1338
      max_count = arg.max
1339
    elif arg.max is not None:
1340
      max_count += arg.max
1341

    
1342
    if idx == last_idx:
1343
      check_max = (arg.max is not None)
1344

    
1345
    elif arg.max is None:
1346
      raise errors.ProgrammerError("Only the last argument can have max=None")
1347

    
1348
  if check_max:
1349
    # Command with exact number of arguments
1350
    if (min_count is not None and max_count is not None and
1351
        min_count == max_count and len(args) != min_count):
1352
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1353
      return False
1354

    
1355
    # Command with limited number of arguments
1356
    if max_count is not None and len(args) > max_count:
1357
      ToStderr("Error: Command %s expects only %d argument(s)",
1358
               cmd, max_count)
1359
      return False
1360

    
1361
  # Command with some required arguments
1362
  if min_count is not None and len(args) < min_count:
1363
    ToStderr("Error: Command %s expects at least %d argument(s)",
1364
             cmd, min_count)
1365
    return False
1366

    
1367
  return True
1368

    
1369

    
1370
def SplitNodeOption(value):
1371
  """Splits the value of a --node option.
1372

1373
  """
1374
  if value and ':' in value:
1375
    return value.split(':', 1)
1376
  else:
1377
    return (value, None)
1378

    
1379

    
1380
def CalculateOSNames(os_name, os_variants):
1381
  """Calculates all the names an OS can be called, according to its variants.
1382

1383
  @type os_name: string
1384
  @param os_name: base name of the os
1385
  @type os_variants: list or None
1386
  @param os_variants: list of supported variants
1387
  @rtype: list
1388
  @return: list of valid names
1389

1390
  """
1391
  if os_variants:
1392
    return ['%s+%s' % (os_name, v) for v in os_variants]
1393
  else:
1394
    return [os_name]
1395

    
1396

    
1397
def ParseFields(selected, default):
1398
  """Parses the values of "--field"-like options.
1399

1400
  @type selected: string or None
1401
  @param selected: User-selected options
1402
  @type default: list
1403
  @param default: Default fields
1404

1405
  """
1406
  if selected is None:
1407
    return default
1408

    
1409
  if selected.startswith("+"):
1410
    return default + selected[1:].split(",")
1411

    
1412
  return selected.split(",")
1413

    
1414

    
1415
UsesRPC = rpc.RunWithRPC
1416

    
1417

    
1418
def AskUser(text, choices=None):
1419
  """Ask the user a question.
1420

1421
  @param text: the question to ask
1422

1423
  @param choices: list with elements tuples (input_char, return_value,
1424
      description); if not given, it will default to: [('y', True,
1425
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1426
      note that the '?' char is reserved for help
1427

1428
  @return: one of the return values from the choices list; if input is
1429
      not possible (i.e. not running with a tty, we return the last
1430
      entry from the list
1431

1432
  """
1433
  if choices is None:
1434
    choices = [('y', True, 'Perform the operation'),
1435
               ('n', False, 'Do not perform the operation')]
1436
  if not choices or not isinstance(choices, list):
1437
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1438
  for entry in choices:
1439
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1440
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1441

    
1442
  answer = choices[-1][1]
1443
  new_text = []
1444
  for line in text.splitlines():
1445
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1446
  text = "\n".join(new_text)
1447
  try:
1448
    f = file("/dev/tty", "a+")
1449
  except IOError:
1450
    return answer
1451
  try:
1452
    chars = [entry[0] for entry in choices]
1453
    chars[-1] = "[%s]" % chars[-1]
1454
    chars.append('?')
1455
    maps = dict([(entry[0], entry[1]) for entry in choices])
1456
    while True:
1457
      f.write(text)
1458
      f.write('\n')
1459
      f.write("/".join(chars))
1460
      f.write(": ")
1461
      line = f.readline(2).strip().lower()
1462
      if line in maps:
1463
        answer = maps[line]
1464
        break
1465
      elif line == '?':
1466
        for entry in choices:
1467
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1468
        f.write("\n")
1469
        continue
1470
  finally:
1471
    f.close()
1472
  return answer
1473

    
1474

    
1475
class JobSubmittedException(Exception):
1476
  """Job was submitted, client should exit.
1477

1478
  This exception has one argument, the ID of the job that was
1479
  submitted. The handler should print this ID.
1480

1481
  This is not an error, just a structured way to exit from clients.
1482

1483
  """
1484

    
1485

    
1486
def SendJob(ops, cl=None):
1487
  """Function to submit an opcode without waiting for the results.
1488

1489
  @type ops: list
1490
  @param ops: list of opcodes
1491
  @type cl: luxi.Client
1492
  @param cl: the luxi client to use for communicating with the master;
1493
             if None, a new client will be created
1494

1495
  """
1496
  if cl is None:
1497
    cl = GetClient()
1498

    
1499
  job_id = cl.SubmitJob(ops)
1500

    
1501
  return job_id
1502

    
1503

    
1504
def GenericPollJob(job_id, cbs, report_cbs):
1505
  """Generic job-polling function.
1506

1507
  @type job_id: number
1508
  @param job_id: Job ID
1509
  @type cbs: Instance of L{JobPollCbBase}
1510
  @param cbs: Data callbacks
1511
  @type report_cbs: Instance of L{JobPollReportCbBase}
1512
  @param report_cbs: Reporting callbacks
1513

1514
  """
1515
  prev_job_info = None
1516
  prev_logmsg_serial = None
1517

    
1518
  status = None
1519

    
1520
  while True:
1521
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1522
                                      prev_logmsg_serial)
1523
    if not result:
1524
      # job not found, go away!
1525
      raise errors.JobLost("Job with id %s lost" % job_id)
1526

    
1527
    if result == constants.JOB_NOTCHANGED:
1528
      report_cbs.ReportNotChanged(job_id, status)
1529

    
1530
      # Wait again
1531
      continue
1532

    
1533
    # Split result, a tuple of (field values, log entries)
1534
    (job_info, log_entries) = result
1535
    (status, ) = job_info
1536

    
1537
    if log_entries:
1538
      for log_entry in log_entries:
1539
        (serial, timestamp, log_type, message) = log_entry
1540
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1541
                                    log_type, message)
1542
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1543

    
1544
    # TODO: Handle canceled and archived jobs
1545
    elif status in (constants.JOB_STATUS_SUCCESS,
1546
                    constants.JOB_STATUS_ERROR,
1547
                    constants.JOB_STATUS_CANCELING,
1548
                    constants.JOB_STATUS_CANCELED):
1549
      break
1550

    
1551
    prev_job_info = job_info
1552

    
1553
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1554
  if not jobs:
1555
    raise errors.JobLost("Job with id %s lost" % job_id)
1556

    
1557
  status, opstatus, result = jobs[0]
1558

    
1559
  if status == constants.JOB_STATUS_SUCCESS:
1560
    return result
1561

    
1562
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1563
    raise errors.OpExecError("Job was canceled")
1564

    
1565
  has_ok = False
1566
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1567
    if status == constants.OP_STATUS_SUCCESS:
1568
      has_ok = True
1569
    elif status == constants.OP_STATUS_ERROR:
1570
      errors.MaybeRaise(msg)
1571

    
1572
      if has_ok:
1573
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1574
                                 (idx, msg))
1575

    
1576
      raise errors.OpExecError(str(msg))
1577

    
1578
  # default failure mode
1579
  raise errors.OpExecError(result)
1580

    
1581

    
1582
class JobPollCbBase:
1583
  """Base class for L{GenericPollJob} callbacks.
1584

1585
  """
1586
  def __init__(self):
1587
    """Initializes this class.
1588

1589
    """
1590

    
1591
  def WaitForJobChangeOnce(self, job_id, fields,
1592
                           prev_job_info, prev_log_serial):
1593
    """Waits for changes on a job.
1594

1595
    """
1596
    raise NotImplementedError()
1597

    
1598
  def QueryJobs(self, job_ids, fields):
1599
    """Returns the selected fields for the selected job IDs.
1600

1601
    @type job_ids: list of numbers
1602
    @param job_ids: Job IDs
1603
    @type fields: list of strings
1604
    @param fields: Fields
1605

1606
    """
1607
    raise NotImplementedError()
1608

    
1609

    
1610
class JobPollReportCbBase:
1611
  """Base class for L{GenericPollJob} reporting callbacks.
1612

1613
  """
1614
  def __init__(self):
1615
    """Initializes this class.
1616

1617
    """
1618

    
1619
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1620
    """Handles a log message.
1621

1622
    """
1623
    raise NotImplementedError()
1624

    
1625
  def ReportNotChanged(self, job_id, status):
1626
    """Called for if a job hasn't changed in a while.
1627

1628
    @type job_id: number
1629
    @param job_id: Job ID
1630
    @type status: string or None
1631
    @param status: Job status if available
1632

1633
    """
1634
    raise NotImplementedError()
1635

    
1636

    
1637
class _LuxiJobPollCb(JobPollCbBase):
1638
  def __init__(self, cl):
1639
    """Initializes this class.
1640

1641
    """
1642
    JobPollCbBase.__init__(self)
1643
    self.cl = cl
1644

    
1645
  def WaitForJobChangeOnce(self, job_id, fields,
1646
                           prev_job_info, prev_log_serial):
1647
    """Waits for changes on a job.
1648

1649
    """
1650
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1651
                                        prev_job_info, prev_log_serial)
1652

    
1653
  def QueryJobs(self, job_ids, fields):
1654
    """Returns the selected fields for the selected job IDs.
1655

1656
    """
1657
    return self.cl.QueryJobs(job_ids, fields)
1658

    
1659

    
1660
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1661
  def __init__(self, feedback_fn):
1662
    """Initializes this class.
1663

1664
    """
1665
    JobPollReportCbBase.__init__(self)
1666

    
1667
    self.feedback_fn = feedback_fn
1668

    
1669
    assert callable(feedback_fn)
1670

    
1671
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1672
    """Handles a log message.
1673

1674
    """
1675
    self.feedback_fn((timestamp, log_type, log_msg))
1676

    
1677
  def ReportNotChanged(self, job_id, status):
1678
    """Called if a job hasn't changed in a while.
1679

1680
    """
1681
    # Ignore
1682

    
1683

    
1684
class StdioJobPollReportCb(JobPollReportCbBase):
1685
  def __init__(self):
1686
    """Initializes this class.
1687

1688
    """
1689
    JobPollReportCbBase.__init__(self)
1690

    
1691
    self.notified_queued = False
1692
    self.notified_waitlock = False
1693

    
1694
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1695
    """Handles a log message.
1696

1697
    """
1698
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1699
             FormatLogMessage(log_type, log_msg))
1700

    
1701
  def ReportNotChanged(self, job_id, status):
1702
    """Called if a job hasn't changed in a while.
1703

1704
    """
1705
    if status is None:
1706
      return
1707

    
1708
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1709
      ToStderr("Job %s is waiting in queue", job_id)
1710
      self.notified_queued = True
1711

    
1712
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1713
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1714
      self.notified_waitlock = True
1715

    
1716

    
1717
def FormatLogMessage(log_type, log_msg):
1718
  """Formats a job message according to its type.
1719

1720
  """
1721
  if log_type != constants.ELOG_MESSAGE:
1722
    log_msg = str(log_msg)
1723

    
1724
  return utils.SafeEncode(log_msg)
1725

    
1726

    
1727
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1728
  """Function to poll for the result of a job.
1729

1730
  @type job_id: job identified
1731
  @param job_id: the job to poll for results
1732
  @type cl: luxi.Client
1733
  @param cl: the luxi client to use for communicating with the master;
1734
             if None, a new client will be created
1735

1736
  """
1737
  if cl is None:
1738
    cl = GetClient()
1739

    
1740
  if reporter is None:
1741
    if feedback_fn:
1742
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1743
    else:
1744
      reporter = StdioJobPollReportCb()
1745
  elif feedback_fn:
1746
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1747

    
1748
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1749

    
1750

    
1751
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1752
  """Legacy function to submit an opcode.
1753

1754
  This is just a simple wrapper over the construction of the processor
1755
  instance. It should be extended to better handle feedback and
1756
  interaction functions.
1757

1758
  """
1759
  if cl is None:
1760
    cl = GetClient()
1761

    
1762
  SetGenericOpcodeOpts([op], opts)
1763

    
1764
  job_id = SendJob([op], cl=cl)
1765

    
1766
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1767
                       reporter=reporter)
1768

    
1769
  return op_results[0]
1770

    
1771

    
1772
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1773
  """Wrapper around SubmitOpCode or SendJob.
1774

1775
  This function will decide, based on the 'opts' parameter, whether to
1776
  submit and wait for the result of the opcode (and return it), or
1777
  whether to just send the job and print its identifier. It is used in
1778
  order to simplify the implementation of the '--submit' option.
1779

1780
  It will also process the opcodes if we're sending the via SendJob
1781
  (otherwise SubmitOpCode does it).
1782

1783
  """
1784
  if opts and opts.submit_only:
1785
    job = [op]
1786
    SetGenericOpcodeOpts(job, opts)
1787
    job_id = SendJob(job, cl=cl)
1788
    raise JobSubmittedException(job_id)
1789
  else:
1790
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1791

    
1792

    
1793
def SetGenericOpcodeOpts(opcode_list, options):
1794
  """Processor for generic options.
1795

1796
  This function updates the given opcodes based on generic command
1797
  line options (like debug, dry-run, etc.).
1798

1799
  @param opcode_list: list of opcodes
1800
  @param options: command line options or None
1801
  @return: None (in-place modification)
1802

1803
  """
1804
  if not options:
1805
    return
1806
  for op in opcode_list:
1807
    op.debug_level = options.debug
1808
    if hasattr(options, "dry_run"):
1809
      op.dry_run = options.dry_run
1810
    if getattr(options, "priority", None) is not None:
1811
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1812

    
1813

    
1814
def GetClient():
1815
  # TODO: Cache object?
1816
  try:
1817
    client = luxi.Client()
1818
  except luxi.NoMasterError:
1819
    ss = ssconf.SimpleStore()
1820

    
1821
    # Try to read ssconf file
1822
    try:
1823
      ss.GetMasterNode()
1824
    except errors.ConfigurationError:
1825
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1826
                                 " not part of a cluster")
1827

    
1828
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1829
    if master != myself:
1830
      raise errors.OpPrereqError("This is not the master node, please connect"
1831
                                 " to node '%s' and rerun the command" %
1832
                                 master)
1833
    raise
1834
  return client
1835

    
1836

    
1837
def FormatError(err):
1838
  """Return a formatted error message for a given error.
1839

1840
  This function takes an exception instance and returns a tuple
1841
  consisting of two values: first, the recommended exit code, and
1842
  second, a string describing the error message (not
1843
  newline-terminated).
1844

1845
  """
1846
  retcode = 1
1847
  obuf = StringIO()
1848
  msg = str(err)
1849
  if isinstance(err, errors.ConfigurationError):
1850
    txt = "Corrupt configuration file: %s" % msg
1851
    logging.error(txt)
1852
    obuf.write(txt + "\n")
1853
    obuf.write("Aborting.")
1854
    retcode = 2
1855
  elif isinstance(err, errors.HooksAbort):
1856
    obuf.write("Failure: hooks execution failed:\n")
1857
    for node, script, out in err.args[0]:
1858
      if out:
1859
        obuf.write("  node: %s, script: %s, output: %s\n" %
1860
                   (node, script, out))
1861
      else:
1862
        obuf.write("  node: %s, script: %s (no output)\n" %
1863
                   (node, script))
1864
  elif isinstance(err, errors.HooksFailure):
1865
    obuf.write("Failure: hooks general failure: %s" % msg)
1866
  elif isinstance(err, errors.ResolverError):
1867
    this_host = netutils.Hostname.GetSysName()
1868
    if err.args[0] == this_host:
1869
      msg = "Failure: can't resolve my own hostname ('%s')"
1870
    else:
1871
      msg = "Failure: can't resolve hostname '%s'"
1872
    obuf.write(msg % err.args[0])
1873
  elif isinstance(err, errors.OpPrereqError):
1874
    if len(err.args) == 2:
1875
      obuf.write("Failure: prerequisites not met for this"
1876
               " operation:\nerror type: %s, error details:\n%s" %
1877
                 (err.args[1], err.args[0]))
1878
    else:
1879
      obuf.write("Failure: prerequisites not met for this"
1880
                 " operation:\n%s" % msg)
1881
  elif isinstance(err, errors.OpExecError):
1882
    obuf.write("Failure: command execution error:\n%s" % msg)
1883
  elif isinstance(err, errors.TagError):
1884
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1885
  elif isinstance(err, errors.JobQueueDrainError):
1886
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1887
               " accept new requests\n")
1888
  elif isinstance(err, errors.JobQueueFull):
1889
    obuf.write("Failure: the job queue is full and doesn't accept new"
1890
               " job submissions until old jobs are archived\n")
1891
  elif isinstance(err, errors.TypeEnforcementError):
1892
    obuf.write("Parameter Error: %s" % msg)
1893
  elif isinstance(err, errors.ParameterError):
1894
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1895
  elif isinstance(err, luxi.NoMasterError):
1896
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1897
               " and listening for connections?")
1898
  elif isinstance(err, luxi.TimeoutError):
1899
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1900
               " been submitted and will continue to run even if the call"
1901
               " timed out. Useful commands in this situation are \"gnt-job"
1902
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1903
    obuf.write(msg)
1904
  elif isinstance(err, luxi.PermissionError):
1905
    obuf.write("It seems you don't have permissions to connect to the"
1906
               " master daemon.\nPlease retry as a different user.")
1907
  elif isinstance(err, luxi.ProtocolError):
1908
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1909
               "%s" % msg)
1910
  elif isinstance(err, errors.JobLost):
1911
    obuf.write("Error checking job status: %s" % msg)
1912
  elif isinstance(err, errors.QueryFilterParseError):
1913
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1914
    obuf.write("\n".join(err.GetDetails()))
1915
  elif isinstance(err, errors.GenericError):
1916
    obuf.write("Unhandled Ganeti error: %s" % msg)
1917
  elif isinstance(err, JobSubmittedException):
1918
    obuf.write("JobID: %s\n" % err.args[0])
1919
    retcode = 0
1920
  else:
1921
    obuf.write("Unhandled exception: %s" % msg)
1922
  return retcode, obuf.getvalue().rstrip('\n')
1923

    
1924

    
1925
def GenericMain(commands, override=None, aliases=None):
1926
  """Generic main function for all the gnt-* commands.
1927

1928
  Arguments:
1929
    - commands: a dictionary with a special structure, see the design doc
1930
                for command line handling.
1931
    - override: if not None, we expect a dictionary with keys that will
1932
                override command line options; this can be used to pass
1933
                options from the scripts to generic functions
1934
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1935

1936
  """
1937
  # save the program name and the entire command line for later logging
1938
  if sys.argv:
1939
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1940
    if len(sys.argv) >= 2:
1941
      binary += " " + sys.argv[1]
1942
      old_cmdline = " ".join(sys.argv[2:])
1943
    else:
1944
      old_cmdline = ""
1945
  else:
1946
    binary = "<unknown program>"
1947
    old_cmdline = ""
1948

    
1949
  if aliases is None:
1950
    aliases = {}
1951

    
1952
  try:
1953
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1954
  except errors.ParameterError, err:
1955
    result, err_msg = FormatError(err)
1956
    ToStderr(err_msg)
1957
    return 1
1958

    
1959
  if func is None: # parse error
1960
    return 1
1961

    
1962
  if override is not None:
1963
    for key, val in override.iteritems():
1964
      setattr(options, key, val)
1965

    
1966
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1967
                     stderr_logging=True)
1968

    
1969
  if old_cmdline:
1970
    logging.info("run with arguments '%s'", old_cmdline)
1971
  else:
1972
    logging.info("run with no arguments")
1973

    
1974
  try:
1975
    result = func(options, args)
1976
  except (errors.GenericError, luxi.ProtocolError,
1977
          JobSubmittedException), err:
1978
    result, err_msg = FormatError(err)
1979
    logging.exception("Error during command processing")
1980
    ToStderr(err_msg)
1981
  except KeyboardInterrupt:
1982
    result = constants.EXIT_FAILURE
1983
    ToStderr("Aborted. Note that if the operation created any jobs, they"
1984
             " might have been submitted and"
1985
             " will continue to run in the background.")
1986

    
1987
  return result
1988

    
1989

    
1990
def ParseNicOption(optvalue):
1991
  """Parses the value of the --net option(s).
1992

1993
  """
1994
  try:
1995
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1996
  except (TypeError, ValueError), err:
1997
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1998

    
1999
  nics = [{}] * nic_max
2000
  for nidx, ndict in optvalue:
2001
    nidx = int(nidx)
2002

    
2003
    if not isinstance(ndict, dict):
2004
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2005
                                 " got %s" % (nidx, ndict))
2006

    
2007
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2008

    
2009
    nics[nidx] = ndict
2010

    
2011
  return nics
2012

    
2013

    
2014
def GenericInstanceCreate(mode, opts, args):
2015
  """Add an instance to the cluster via either creation or import.
2016

2017
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2018
  @param opts: the command line options selected by the user
2019
  @type args: list
2020
  @param args: should contain only one element, the new instance name
2021
  @rtype: int
2022
  @return: the desired exit code
2023

2024
  """
2025
  instance = args[0]
2026

    
2027
  (pnode, snode) = SplitNodeOption(opts.node)
2028

    
2029
  hypervisor = None
2030
  hvparams = {}
2031
  if opts.hypervisor:
2032
    hypervisor, hvparams = opts.hypervisor
2033

    
2034
  if opts.nics:
2035
    nics = ParseNicOption(opts.nics)
2036
  elif opts.no_nics:
2037
    # no nics
2038
    nics = []
2039
  elif mode == constants.INSTANCE_CREATE:
2040
    # default of one nic, all auto
2041
    nics = [{}]
2042
  else:
2043
    # mode == import
2044
    nics = []
2045

    
2046
  if opts.disk_template == constants.DT_DISKLESS:
2047
    if opts.disks or opts.sd_size is not None:
2048
      raise errors.OpPrereqError("Diskless instance but disk"
2049
                                 " information passed")
2050
    disks = []
2051
  else:
2052
    if (not opts.disks and not opts.sd_size
2053
        and mode == constants.INSTANCE_CREATE):
2054
      raise errors.OpPrereqError("No disk information specified")
2055
    if opts.disks and opts.sd_size is not None:
2056
      raise errors.OpPrereqError("Please use either the '--disk' or"
2057
                                 " '-s' option")
2058
    if opts.sd_size is not None:
2059
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2060

    
2061
    if opts.disks:
2062
      try:
2063
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2064
      except ValueError, err:
2065
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2066
      disks = [{}] * disk_max
2067
    else:
2068
      disks = []
2069
    for didx, ddict in opts.disks:
2070
      didx = int(didx)
2071
      if not isinstance(ddict, dict):
2072
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2073
        raise errors.OpPrereqError(msg)
2074
      elif constants.IDISK_SIZE in ddict:
2075
        if constants.IDISK_ADOPT in ddict:
2076
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2077
                                     " (disk %d)" % didx)
2078
        try:
2079
          ddict[constants.IDISK_SIZE] = \
2080
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2081
        except ValueError, err:
2082
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2083
                                     (didx, err))
2084
      elif constants.IDISK_ADOPT in ddict:
2085
        if mode == constants.INSTANCE_IMPORT:
2086
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2087
                                     " import")
2088
        ddict[constants.IDISK_SIZE] = 0
2089
      else:
2090
        raise errors.OpPrereqError("Missing size or adoption source for"
2091
                                   " disk %d" % didx)
2092
      disks[didx] = ddict
2093

    
2094
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2095
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2096

    
2097
  if mode == constants.INSTANCE_CREATE:
2098
    start = opts.start
2099
    os_type = opts.os
2100
    force_variant = opts.force_variant
2101
    src_node = None
2102
    src_path = None
2103
    no_install = opts.no_install
2104
    identify_defaults = False
2105
  elif mode == constants.INSTANCE_IMPORT:
2106
    start = False
2107
    os_type = None
2108
    force_variant = False
2109
    src_node = opts.src_node
2110
    src_path = opts.src_dir
2111
    no_install = None
2112
    identify_defaults = opts.identify_defaults
2113
  else:
2114
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2115

    
2116
  op = opcodes.OpInstanceCreate(instance_name=instance,
2117
                                disks=disks,
2118
                                disk_template=opts.disk_template,
2119
                                nics=nics,
2120
                                pnode=pnode, snode=snode,
2121
                                ip_check=opts.ip_check,
2122
                                name_check=opts.name_check,
2123
                                wait_for_sync=opts.wait_for_sync,
2124
                                file_storage_dir=opts.file_storage_dir,
2125
                                file_driver=opts.file_driver,
2126
                                iallocator=opts.iallocator,
2127
                                hypervisor=hypervisor,
2128
                                hvparams=hvparams,
2129
                                beparams=opts.beparams,
2130
                                osparams=opts.osparams,
2131
                                mode=mode,
2132
                                start=start,
2133
                                os_type=os_type,
2134
                                force_variant=force_variant,
2135
                                src_node=src_node,
2136
                                src_path=src_path,
2137
                                no_install=no_install,
2138
                                identify_defaults=identify_defaults)
2139

    
2140
  SubmitOrSend(op, opts)
2141
  return 0
2142

    
2143

    
2144
class _RunWhileClusterStoppedHelper:
2145
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2146

2147
  """
2148
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2149
    """Initializes this class.
2150

2151
    @type feedback_fn: callable
2152
    @param feedback_fn: Feedback function
2153
    @type cluster_name: string
2154
    @param cluster_name: Cluster name
2155
    @type master_node: string
2156
    @param master_node Master node name
2157
    @type online_nodes: list
2158
    @param online_nodes: List of names of online nodes
2159

2160
    """
2161
    self.feedback_fn = feedback_fn
2162
    self.cluster_name = cluster_name
2163
    self.master_node = master_node
2164
    self.online_nodes = online_nodes
2165

    
2166
    self.ssh = ssh.SshRunner(self.cluster_name)
2167

    
2168
    self.nonmaster_nodes = [name for name in online_nodes
2169
                            if name != master_node]
2170

    
2171
    assert self.master_node not in self.nonmaster_nodes
2172

    
2173
  def _RunCmd(self, node_name, cmd):
2174
    """Runs a command on the local or a remote machine.
2175

2176
    @type node_name: string
2177
    @param node_name: Machine name
2178
    @type cmd: list
2179
    @param cmd: Command
2180

2181
    """
2182
    if node_name is None or node_name == self.master_node:
2183
      # No need to use SSH
2184
      result = utils.RunCmd(cmd)
2185
    else:
2186
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2187

    
2188
    if result.failed:
2189
      errmsg = ["Failed to run command %s" % result.cmd]
2190
      if node_name:
2191
        errmsg.append("on node %s" % node_name)
2192
      errmsg.append(": exitcode %s and error %s" %
2193
                    (result.exit_code, result.output))
2194
      raise errors.OpExecError(" ".join(errmsg))
2195

    
2196
  def Call(self, fn, *args):
2197
    """Call function while all daemons are stopped.
2198

2199
    @type fn: callable
2200
    @param fn: Function to be called
2201

2202
    """
2203
    # Pause watcher by acquiring an exclusive lock on watcher state file
2204
    self.feedback_fn("Blocking watcher")
2205
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2206
    try:
2207
      # TODO: Currently, this just blocks. There's no timeout.
2208
      # TODO: Should it be a shared lock?
2209
      watcher_block.Exclusive(blocking=True)
2210

    
2211
      # Stop master daemons, so that no new jobs can come in and all running
2212
      # ones are finished
2213
      self.feedback_fn("Stopping master daemons")
2214
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2215
      try:
2216
        # Stop daemons on all nodes
2217
        for node_name in self.online_nodes:
2218
          self.feedback_fn("Stopping daemons on %s" % node_name)
2219
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2220

    
2221
        # All daemons are shut down now
2222
        try:
2223
          return fn(self, *args)
2224
        except Exception, err:
2225
          _, errmsg = FormatError(err)
2226
          logging.exception("Caught exception")
2227
          self.feedback_fn(errmsg)
2228
          raise
2229
      finally:
2230
        # Start cluster again, master node last
2231
        for node_name in self.nonmaster_nodes + [self.master_node]:
2232
          self.feedback_fn("Starting daemons on %s" % node_name)
2233
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2234
    finally:
2235
      # Resume watcher
2236
      watcher_block.Close()
2237

    
2238

    
2239
def RunWhileClusterStopped(feedback_fn, fn, *args):
2240
  """Calls a function while all cluster daemons are stopped.
2241

2242
  @type feedback_fn: callable
2243
  @param feedback_fn: Feedback function
2244
  @type fn: callable
2245
  @param fn: Function to be called when daemons are stopped
2246

2247
  """
2248
  feedback_fn("Gathering cluster information")
2249

    
2250
  # This ensures we're running on the master daemon
2251
  cl = GetClient()
2252

    
2253
  (cluster_name, master_node) = \
2254
    cl.QueryConfigValues(["cluster_name", "master_node"])
2255

    
2256
  online_nodes = GetOnlineNodes([], cl=cl)
2257

    
2258
  # Don't keep a reference to the client. The master daemon will go away.
2259
  del cl
2260

    
2261
  assert master_node in online_nodes
2262

    
2263
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2264
                                       online_nodes).Call(fn, *args)
2265

    
2266

    
2267
def GenerateTable(headers, fields, separator, data,
2268
                  numfields=None, unitfields=None,
2269
                  units=None):
2270
  """Prints a table with headers and different fields.
2271

2272
  @type headers: dict
2273
  @param headers: dictionary mapping field names to headers for
2274
      the table
2275
  @type fields: list
2276
  @param fields: the field names corresponding to each row in
2277
      the data field
2278
  @param separator: the separator to be used; if this is None,
2279
      the default 'smart' algorithm is used which computes optimal
2280
      field width, otherwise just the separator is used between
2281
      each field
2282
  @type data: list
2283
  @param data: a list of lists, each sublist being one row to be output
2284
  @type numfields: list
2285
  @param numfields: a list with the fields that hold numeric
2286
      values and thus should be right-aligned
2287
  @type unitfields: list
2288
  @param unitfields: a list with the fields that hold numeric
2289
      values that should be formatted with the units field
2290
  @type units: string or None
2291
  @param units: the units we should use for formatting, or None for
2292
      automatic choice (human-readable for non-separator usage, otherwise
2293
      megabytes); this is a one-letter string
2294

2295
  """
2296
  if units is None:
2297
    if separator:
2298
      units = "m"
2299
    else:
2300
      units = "h"
2301

    
2302
  if numfields is None:
2303
    numfields = []
2304
  if unitfields is None:
2305
    unitfields = []
2306

    
2307
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2308
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2309

    
2310
  format_fields = []
2311
  for field in fields:
2312
    if headers and field not in headers:
2313
      # TODO: handle better unknown fields (either revert to old
2314
      # style of raising exception, or deal more intelligently with
2315
      # variable fields)
2316
      headers[field] = field
2317
    if separator is not None:
2318
      format_fields.append("%s")
2319
    elif numfields.Matches(field):
2320
      format_fields.append("%*s")
2321
    else:
2322
      format_fields.append("%-*s")
2323

    
2324
  if separator is None:
2325
    mlens = [0 for name in fields]
2326
    format_str = ' '.join(format_fields)
2327
  else:
2328
    format_str = separator.replace("%", "%%").join(format_fields)
2329

    
2330
  for row in data:
2331
    if row is None:
2332
      continue
2333
    for idx, val in enumerate(row):
2334
      if unitfields.Matches(fields[idx]):
2335
        try:
2336
          val = int(val)
2337
        except (TypeError, ValueError):
2338
          pass
2339
        else:
2340
          val = row[idx] = utils.FormatUnit(val, units)
2341
      val = row[idx] = str(val)
2342
      if separator is None:
2343
        mlens[idx] = max(mlens[idx], len(val))
2344

    
2345
  result = []
2346
  if headers:
2347
    args = []
2348
    for idx, name in enumerate(fields):
2349
      hdr = headers[name]
2350
      if separator is None:
2351
        mlens[idx] = max(mlens[idx], len(hdr))
2352
        args.append(mlens[idx])
2353
      args.append(hdr)
2354
    result.append(format_str % tuple(args))
2355

    
2356
  if separator is None:
2357
    assert len(mlens) == len(fields)
2358

    
2359
    if fields and not numfields.Matches(fields[-1]):
2360
      mlens[-1] = 0
2361

    
2362
  for line in data:
2363
    args = []
2364
    if line is None:
2365
      line = ['-' for _ in fields]
2366
    for idx in range(len(fields)):
2367
      if separator is None:
2368
        args.append(mlens[idx])
2369
      args.append(line[idx])
2370
    result.append(format_str % tuple(args))
2371

    
2372
  return result
2373

    
2374

    
2375
def _FormatBool(value):
2376
  """Formats a boolean value as a string.
2377

2378
  """
2379
  if value:
2380
    return "Y"
2381
  return "N"
2382

    
2383

    
2384
#: Default formatting for query results; (callback, align right)
2385
_DEFAULT_FORMAT_QUERY = {
2386
  constants.QFT_TEXT: (str, False),
2387
  constants.QFT_BOOL: (_FormatBool, False),
2388
  constants.QFT_NUMBER: (str, True),
2389
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2390
  constants.QFT_OTHER: (str, False),
2391
  constants.QFT_UNKNOWN: (str, False),
2392
  }
2393

    
2394

    
2395
def _GetColumnFormatter(fdef, override, unit):
2396
  """Returns formatting function for a field.
2397

2398
  @type fdef: L{objects.QueryFieldDefinition}
2399
  @type override: dict
2400
  @param override: Dictionary for overriding field formatting functions,
2401
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2402
  @type unit: string
2403
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2404
  @rtype: tuple; (callable, bool)
2405
  @return: Returns the function to format a value (takes one parameter) and a
2406
    boolean for aligning the value on the right-hand side
2407

2408
  """
2409
  fmt = override.get(fdef.name, None)
2410
  if fmt is not None:
2411
    return fmt
2412

    
2413
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2414

    
2415
  if fdef.kind == constants.QFT_UNIT:
2416
    # Can't keep this information in the static dictionary
2417
    return (lambda value: utils.FormatUnit(value, unit), True)
2418

    
2419
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2420
  if fmt is not None:
2421
    return fmt
2422

    
2423
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2424

    
2425

    
2426
class _QueryColumnFormatter:
2427
  """Callable class for formatting fields of a query.
2428

2429
  """
2430
  def __init__(self, fn, status_fn, verbose):
2431
    """Initializes this class.
2432

2433
    @type fn: callable
2434
    @param fn: Formatting function
2435
    @type status_fn: callable
2436
    @param status_fn: Function to report fields' status
2437
    @type verbose: boolean
2438
    @param verbose: whether to use verbose field descriptions or not
2439

2440
    """
2441
    self._fn = fn
2442
    self._status_fn = status_fn
2443
    self._verbose = verbose
2444

    
2445
  def __call__(self, data):
2446
    """Returns a field's string representation.
2447

2448
    """
2449
    (status, value) = data
2450

    
2451
    # Report status
2452
    self._status_fn(status)
2453

    
2454
    if status == constants.RS_NORMAL:
2455
      return self._fn(value)
2456

    
2457
    assert value is None, \
2458
           "Found value %r for abnormal status %s" % (value, status)
2459

    
2460
    return FormatResultError(status, self._verbose)
2461

    
2462

    
2463
def FormatResultError(status, verbose):
2464
  """Formats result status other than L{constants.RS_NORMAL}.
2465

2466
  @param status: The result status
2467
  @type verbose: boolean
2468
  @param verbose: Whether to return the verbose text
2469
  @return: Text of result status
2470

2471
  """
2472
  assert status != constants.RS_NORMAL, \
2473
         "FormatResultError called with status equal to constants.RS_NORMAL"
2474
  try:
2475
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2476
  except KeyError:
2477
    raise NotImplementedError("Unknown status %s" % status)
2478
  else:
2479
    if verbose:
2480
      return verbose_text
2481
    return normal_text
2482

    
2483

    
2484
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2485
                      header=False, verbose=False):
2486
  """Formats data in L{objects.QueryResponse}.
2487

2488
  @type result: L{objects.QueryResponse}
2489
  @param result: result of query operation
2490
  @type unit: string
2491
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2492
    see L{utils.text.FormatUnit}
2493
  @type format_override: dict
2494
  @param format_override: Dictionary for overriding field formatting functions,
2495
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2496
  @type separator: string or None
2497
  @param separator: String used to separate fields
2498
  @type header: bool
2499
  @param header: Whether to output header row
2500
  @type verbose: boolean
2501
  @param verbose: whether to use verbose field descriptions or not
2502

2503
  """
2504
  if unit is None:
2505
    if separator:
2506
      unit = "m"
2507
    else:
2508
      unit = "h"
2509

    
2510
  if format_override is None:
2511
    format_override = {}
2512

    
2513
  stats = dict.fromkeys(constants.RS_ALL, 0)
2514

    
2515
  def _RecordStatus(status):
2516
    if status in stats:
2517
      stats[status] += 1
2518

    
2519
  columns = []
2520
  for fdef in result.fields:
2521
    assert fdef.title and fdef.name
2522
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2523
    columns.append(TableColumn(fdef.title,
2524
                               _QueryColumnFormatter(fn, _RecordStatus,
2525
                                                     verbose),
2526
                               align_right))
2527

    
2528
  table = FormatTable(result.data, columns, header, separator)
2529

    
2530
  # Collect statistics
2531
  assert len(stats) == len(constants.RS_ALL)
2532
  assert compat.all(count >= 0 for count in stats.values())
2533

    
2534
  # Determine overall status. If there was no data, unknown fields must be
2535
  # detected via the field definitions.
2536
  if (stats[constants.RS_UNKNOWN] or
2537
      (not result.data and _GetUnknownFields(result.fields))):
2538
    status = QR_UNKNOWN
2539
  elif compat.any(count > 0 for key, count in stats.items()
2540
                  if key != constants.RS_NORMAL):
2541
    status = QR_INCOMPLETE
2542
  else:
2543
    status = QR_NORMAL
2544

    
2545
  return (status, table)
2546

    
2547

    
2548
def _GetUnknownFields(fdefs):
2549
  """Returns list of unknown fields included in C{fdefs}.
2550

2551
  @type fdefs: list of L{objects.QueryFieldDefinition}
2552

2553
  """
2554
  return [fdef for fdef in fdefs
2555
          if fdef.kind == constants.QFT_UNKNOWN]
2556

    
2557

    
2558
def _WarnUnknownFields(fdefs):
2559
  """Prints a warning to stderr if a query included unknown fields.
2560

2561
  @type fdefs: list of L{objects.QueryFieldDefinition}
2562

2563
  """
2564
  unknown = _GetUnknownFields(fdefs)
2565
  if unknown:
2566
    ToStderr("Warning: Queried for unknown fields %s",
2567
             utils.CommaJoin(fdef.name for fdef in unknown))
2568
    return True
2569

    
2570
  return False
2571

    
2572

    
2573
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2574
                format_override=None, verbose=False, force_filter=False):
2575
  """Generic implementation for listing all items of a resource.
2576

2577
  @param resource: One of L{constants.QR_VIA_LUXI}
2578
  @type fields: list of strings
2579
  @param fields: List of fields to query for
2580
  @type names: list of strings
2581
  @param names: Names of items to query for
2582
  @type unit: string or None
2583
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2584
    None for automatic choice (human-readable for non-separator usage,
2585
    otherwise megabytes); this is a one-letter string
2586
  @type separator: string or None
2587
  @param separator: String used to separate fields
2588
  @type header: bool
2589
  @param header: Whether to show header row
2590
  @type force_filter: bool
2591
  @param force_filter: Whether to always treat names as filter
2592
  @type format_override: dict
2593
  @param format_override: Dictionary for overriding field formatting functions,
2594
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2595
  @type verbose: boolean
2596
  @param verbose: whether to use verbose field descriptions or not
2597

2598
  """
2599
  if cl is None:
2600
    cl = GetClient()
2601

    
2602
  if not names:
2603
    names = None
2604

    
2605
  if (force_filter or
2606
      (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
2607
    try:
2608
      (filter_text, ) = names
2609
    except ValueError:
2610
      raise errors.OpPrereqError("Exactly one argument must be given as a"
2611
                                 " filter")
2612

    
2613
    logging.debug("Parsing '%s' as filter", filter_text)
2614
    filter_ = qlang.ParseFilter(filter_text)
2615
  else:
2616
    filter_ = qlang.MakeSimpleFilter("name", names)
2617

    
2618
  response = cl.Query(resource, fields, filter_)
2619

    
2620
  found_unknown = _WarnUnknownFields(response.fields)
2621

    
2622
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2623
                                     header=header,
2624
                                     format_override=format_override,
2625
                                     verbose=verbose)
2626

    
2627
  for line in data:
2628
    ToStdout(line)
2629

    
2630
  assert ((found_unknown and status == QR_UNKNOWN) or
2631
          (not found_unknown and status != QR_UNKNOWN))
2632

    
2633
  if status == QR_UNKNOWN:
2634
    return constants.EXIT_UNKNOWN_FIELD
2635

    
2636
  # TODO: Should the list command fail if not all data could be collected?
2637
  return constants.EXIT_SUCCESS
2638

    
2639

    
2640
def GenericListFields(resource, fields, separator, header, cl=None):
2641
  """Generic implementation for listing fields for a resource.
2642

2643
  @param resource: One of L{constants.QR_VIA_LUXI}
2644
  @type fields: list of strings
2645
  @param fields: List of fields to query for
2646
  @type separator: string or None
2647
  @param separator: String used to separate fields
2648
  @type header: bool
2649
  @param header: Whether to show header row
2650

2651
  """
2652
  if cl is None:
2653
    cl = GetClient()
2654

    
2655
  if not fields:
2656
    fields = None
2657

    
2658
  response = cl.QueryFields(resource, fields)
2659

    
2660
  found_unknown = _WarnUnknownFields(response.fields)
2661

    
2662
  columns = [
2663
    TableColumn("Name", str, False),
2664
    TableColumn("Title", str, False),
2665
    TableColumn("Description", str, False),
2666
    ]
2667

    
2668
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2669

    
2670
  for line in FormatTable(rows, columns, header, separator):
2671
    ToStdout(line)
2672

    
2673
  if found_unknown:
2674
    return constants.EXIT_UNKNOWN_FIELD
2675

    
2676
  return constants.EXIT_SUCCESS
2677

    
2678

    
2679
class TableColumn:
2680
  """Describes a column for L{FormatTable}.
2681

2682
  """
2683
  def __init__(self, title, fn, align_right):
2684
    """Initializes this class.
2685

2686
    @type title: string
2687
    @param title: Column title
2688
    @type fn: callable
2689
    @param fn: Formatting function
2690
    @type align_right: bool
2691
    @param align_right: Whether to align values on the right-hand side
2692

2693
    """
2694
    self.title = title
2695
    self.format = fn
2696
    self.align_right = align_right
2697

    
2698

    
2699
def _GetColFormatString(width, align_right):
2700
  """Returns the format string for a field.
2701

2702
  """
2703
  if align_right:
2704
    sign = ""
2705
  else:
2706
    sign = "-"
2707

    
2708
  return "%%%s%ss" % (sign, width)
2709

    
2710

    
2711
def FormatTable(rows, columns, header, separator):
2712
  """Formats data as a table.
2713

2714
  @type rows: list of lists
2715
  @param rows: Row data, one list per row
2716
  @type columns: list of L{TableColumn}
2717
  @param columns: Column descriptions
2718
  @type header: bool
2719
  @param header: Whether to show header row
2720
  @type separator: string or None
2721
  @param separator: String used to separate columns
2722

2723
  """
2724
  if header:
2725
    data = [[col.title for col in columns]]
2726
    colwidth = [len(col.title) for col in columns]
2727
  else:
2728
    data = []
2729
    colwidth = [0 for _ in columns]
2730

    
2731
  # Format row data
2732
  for row in rows:
2733
    assert len(row) == len(columns)
2734

    
2735
    formatted = [col.format(value) for value, col in zip(row, columns)]
2736

    
2737
    if separator is None:
2738
      # Update column widths
2739
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2740
        # Modifying a list's items while iterating is fine
2741
        colwidth[idx] = max(oldwidth, len(value))
2742

    
2743
    data.append(formatted)
2744

    
2745
  if separator is not None:
2746
    # Return early if a separator is used
2747
    return [separator.join(row) for row in data]
2748

    
2749
  if columns and not columns[-1].align_right:
2750
    # Avoid unnecessary spaces at end of line
2751
    colwidth[-1] = 0
2752

    
2753
  # Build format string
2754
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2755
                  for col, width in zip(columns, colwidth)])
2756

    
2757
  return [fmt % tuple(row) for row in data]
2758

    
2759

    
2760
def FormatTimestamp(ts):
2761
  """Formats a given timestamp.
2762

2763
  @type ts: timestamp
2764
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2765

2766
  @rtype: string
2767
  @return: a string with the formatted timestamp
2768

2769
  """
2770
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2771
    return '?'
2772
  sec, usec = ts
2773
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2774

    
2775

    
2776
def ParseTimespec(value):
2777
  """Parse a time specification.
2778

2779
  The following suffixed will be recognized:
2780

2781
    - s: seconds
2782
    - m: minutes
2783
    - h: hours
2784
    - d: day
2785
    - w: weeks
2786

2787
  Without any suffix, the value will be taken to be in seconds.
2788

2789
  """
2790
  value = str(value)
2791
  if not value:
2792
    raise errors.OpPrereqError("Empty time specification passed")
2793
  suffix_map = {
2794
    's': 1,
2795
    'm': 60,
2796
    'h': 3600,
2797
    'd': 86400,
2798
    'w': 604800,
2799
    }
2800
  if value[-1] not in suffix_map:
2801
    try:
2802
      value = int(value)
2803
    except (TypeError, ValueError):
2804
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2805
  else:
2806
    multiplier = suffix_map[value[-1]]
2807
    value = value[:-1]
2808
    if not value: # no data left after stripping the suffix
2809
      raise errors.OpPrereqError("Invalid time specification (only"
2810
                                 " suffix passed)")
2811
    try:
2812
      value = int(value) * multiplier
2813
    except (TypeError, ValueError):
2814
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2815
  return value
2816

    
2817

    
2818
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2819
                   filter_master=False):
2820
  """Returns the names of online nodes.
2821

2822
  This function will also log a warning on stderr with the names of
2823
  the online nodes.
2824

2825
  @param nodes: if not empty, use only this subset of nodes (minus the
2826
      offline ones)
2827
  @param cl: if not None, luxi client to use
2828
  @type nowarn: boolean
2829
  @param nowarn: by default, this function will output a note with the
2830
      offline nodes that are skipped; if this parameter is True the
2831
      note is not displayed
2832
  @type secondary_ips: boolean
2833
  @param secondary_ips: if True, return the secondary IPs instead of the
2834
      names, useful for doing network traffic over the replication interface
2835
      (if any)
2836
  @type filter_master: boolean
2837
  @param filter_master: if True, do not return the master node in the list
2838
      (useful in coordination with secondary_ips where we cannot check our
2839
      node name against the list)
2840

2841
  """
2842
  if cl is None:
2843
    cl = GetClient()
2844

    
2845
  if secondary_ips:
2846
    name_idx = 2
2847
  else:
2848
    name_idx = 0
2849

    
2850
  if filter_master:
2851
    master_node = cl.QueryConfigValues(["master_node"])[0]
2852
    filter_fn = lambda x: x != master_node
2853
  else:
2854
    filter_fn = lambda _: True
2855

    
2856
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2857
                         use_locking=False)
2858
  offline = [row[0] for row in result if row[1]]
2859
  if offline and not nowarn:
2860
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2861
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2862

    
2863

    
2864
def _ToStream(stream, txt, *args):
2865
  """Write a message to a stream, bypassing the logging system
2866

2867
  @type stream: file object
2868
  @param stream: the file to which we should write
2869
  @type txt: str
2870
  @param txt: the message
2871

2872
  """
2873
  if args:
2874
    args = tuple(args)
2875
    stream.write(txt % args)
2876
  else:
2877
    stream.write(txt)
2878
  stream.write('\n')
2879
  stream.flush()
2880

    
2881

    
2882
def ToStdout(txt, *args):
2883
  """Write a message to stdout only, bypassing the logging system
2884

2885
  This is just a wrapper over _ToStream.
2886

2887
  @type txt: str
2888
  @param txt: the message
2889

2890
  """
2891
  _ToStream(sys.stdout, txt, *args)
2892

    
2893

    
2894
def ToStderr(txt, *args):
2895
  """Write a message to stderr only, bypassing the logging system
2896

2897
  This is just a wrapper over _ToStream.
2898

2899
  @type txt: str
2900
  @param txt: the message
2901

2902
  """
2903
  _ToStream(sys.stderr, txt, *args)
2904

    
2905

    
2906
class JobExecutor(object):
2907
  """Class which manages the submission and execution of multiple jobs.
2908

2909
  Note that instances of this class should not be reused between
2910
  GetResults() calls.
2911

2912
  """
2913
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2914
    self.queue = []
2915
    if cl is None:
2916
      cl = GetClient()
2917
    self.cl = cl
2918
    self.verbose = verbose
2919
    self.jobs = []
2920
    self.opts = opts
2921
    self.feedback_fn = feedback_fn
2922

    
2923
  def QueueJob(self, name, *ops):
2924
    """Record a job for later submit.
2925

2926
    @type name: string
2927
    @param name: a description of the job, will be used in WaitJobSet
2928
    """
2929
    SetGenericOpcodeOpts(ops, self.opts)
2930
    self.queue.append((name, ops))
2931

    
2932
  def SubmitPending(self, each=False):
2933
    """Submit all pending jobs.
2934

2935
    """
2936
    if each:
2937
      results = []
2938
      for row in self.queue:
2939
        # SubmitJob will remove the success status, but raise an exception if
2940
        # the submission fails, so we'll notice that anyway.
2941
        results.append([True, self.cl.SubmitJob(row[1])])
2942
    else:
2943
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2944
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2945
                                                            self.queue)):
2946
      self.jobs.append((idx, status, data, name))
2947

    
2948
  def _ChooseJob(self):
2949
    """Choose a non-waiting/queued job to poll next.
2950

2951
    """
2952
    assert self.jobs, "_ChooseJob called with empty job list"
2953

    
2954
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2955
    assert result
2956

    
2957
    for job_data, status in zip(self.jobs, result):
2958
      if (isinstance(status, list) and status and
2959
          status[0] in (constants.JOB_STATUS_QUEUED,
2960
                        constants.JOB_STATUS_WAITLOCK,
2961
                        constants.JOB_STATUS_CANCELING)):
2962
        # job is still present and waiting
2963
        continue
2964
      # good candidate found (either running job or lost job)
2965
      self.jobs.remove(job_data)
2966
      return job_data
2967

    
2968
    # no job found
2969
    return self.jobs.pop(0)
2970

    
2971
  def GetResults(self):
2972
    """Wait for and return the results of all jobs.
2973

2974
    @rtype: list
2975
    @return: list of tuples (success, job results), in the same order
2976
        as the submitted jobs; if a job has failed, instead of the result
2977
        there will be the error message
2978

2979
    """
2980
    if not self.jobs:
2981
      self.SubmitPending()
2982
    results = []
2983
    if self.verbose:
2984
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2985
      if ok_jobs:
2986
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2987

    
2988
    # first, remove any non-submitted jobs
2989
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2990
    for idx, _, jid, name in failures:
2991
      ToStderr("Failed to submit job for %s: %s", name, jid)
2992
      results.append((idx, False, jid))
2993

    
2994
    while self.jobs:
2995
      (idx, _, jid, name) = self._ChooseJob()
2996
      ToStdout("Waiting for job %s for %s...", jid, name)
2997
      try:
2998
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2999
        success = True
3000
      except errors.JobLost, err:
3001
        _, job_result = FormatError(err)
3002
        ToStderr("Job %s for %s has been archived, cannot check its result",
3003
                 jid, name)
3004
        success = False
3005
      except (errors.GenericError, luxi.ProtocolError), err:
3006
        _, job_result = FormatError(err)
3007
        success = False
3008
        # the error message will always be shown, verbose or not
3009
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
3010

    
3011
      results.append((idx, success, job_result))
3012

    
3013
    # sort based on the index, then drop it
3014
    results.sort()
3015
    results = [i[1:] for i in results]
3016

    
3017
    return results
3018

    
3019
  def WaitOrShow(self, wait):
3020
    """Wait for job results or only print the job IDs.
3021

3022
    @type wait: boolean
3023
    @param wait: whether to wait or not
3024

3025
    """
3026
    if wait:
3027
      return self.GetResults()
3028
    else:
3029
      if not self.jobs:
3030
        self.SubmitPending()
3031
      for _, status, result, name in self.jobs:
3032
        if status:
3033
          ToStdout("%s: %s", result, name)
3034
        else:
3035
          ToStderr("Failure for %s: %s", name, result)
3036
      return [row[1:3] for row in self.jobs]
3037

    
3038

    
3039
def FormatParameterDict(buf, param_dict, actual, level=1):
3040
  """Formats a parameter dictionary.
3041

3042
  @type buf: L{StringIO}
3043
  @param buf: the buffer into which to write
3044
  @type param_dict: dict
3045
  @param param_dict: the own parameters
3046
  @type actual: dict
3047
  @param actual: the current parameter set (including defaults)
3048
  @param level: Level of indent
3049

3050
  """
3051
  indent = "  " * level
3052
  for key in sorted(actual):
3053
    val = param_dict.get(key, "default (%s)" % actual[key])
3054
    buf.write("%s- %s: %s\n" % (indent, key, val))
3055

    
3056

    
3057
def ConfirmOperation(names, list_type, text, extra=""):
3058
  """Ask the user to confirm an operation on a list of list_type.
3059

3060
  This function is used to request confirmation for doing an operation
3061
  on a given list of list_type.
3062

3063
  @type names: list
3064
  @param names: the list of names that we display when
3065
      we ask for confirmation
3066
  @type list_type: str
3067
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3068
  @type text: str
3069
  @param text: the operation that the user should confirm
3070
  @rtype: boolean
3071
  @return: True or False depending on user's confirmation.
3072

3073
  """
3074
  count = len(names)
3075
  msg = ("The %s will operate on %d %s.\n%s"
3076
         "Do you want to continue?" % (text, count, list_type, extra))
3077
  affected = (("\nAffected %s:\n" % list_type) +
3078
              "\n".join(["  %s" % name for name in names]))
3079

    
3080
  choices = [("y", True, "Yes, execute the %s" % text),
3081
             ("n", False, "No, abort the %s" % text)]
3082

    
3083
  if count > 20:
3084
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3085
    question = msg
3086
  else:
3087
    question = msg + affected
3088

    
3089
  choice = AskUser(question, choices)
3090
  if choice == "v":
3091
    choices.pop(1)
3092
    choice = AskUser(msg + affected, choices)
3093
  return choice