Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ ccfa86ba

History | View | Annotate | Download (98.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "ALLOW_FAILOVER_OPT",
55
  "AUTO_PROMOTE_OPT",
56
  "AUTO_REPLACE_OPT",
57
  "BACKEND_OPT",
58
  "BLK_OS_OPT",
59
  "CAPAB_MASTER_OPT",
60
  "CAPAB_VM_OPT",
61
  "CLEANUP_OPT",
62
  "CLUSTER_DOMAIN_SECRET_OPT",
63
  "CONFIRM_OPT",
64
  "CP_SIZE_OPT",
65
  "DEBUG_OPT",
66
  "DEBUG_SIMERR_OPT",
67
  "DISKIDX_OPT",
68
  "DISK_OPT",
69
  "DISK_TEMPLATE_OPT",
70
  "DRAINED_OPT",
71
  "DRY_RUN_OPT",
72
  "DRBD_HELPER_OPT",
73
  "DST_NODE_OPT",
74
  "EARLY_RELEASE_OPT",
75
  "ENABLED_HV_OPT",
76
  "ERROR_CODES_OPT",
77
  "FIELDS_OPT",
78
  "FILESTORE_DIR_OPT",
79
  "FILESTORE_DRIVER_OPT",
80
  "FORCE_OPT",
81
  "FORCE_VARIANT_OPT",
82
  "GLOBAL_FILEDIR_OPT",
83
  "HID_OS_OPT",
84
  "GLOBAL_SHARED_FILEDIR_OPT",
85
  "HVLIST_OPT",
86
  "HVOPTS_OPT",
87
  "HYPERVISOR_OPT",
88
  "IALLOCATOR_OPT",
89
  "DEFAULT_IALLOCATOR_OPT",
90
  "IDENTIFY_DEFAULTS_OPT",
91
  "IGNORE_CONSIST_OPT",
92
  "IGNORE_FAILURES_OPT",
93
  "IGNORE_OFFLINE_OPT",
94
  "IGNORE_REMOVE_FAILURES_OPT",
95
  "IGNORE_SECONDARIES_OPT",
96
  "IGNORE_SIZE_OPT",
97
  "INTERVAL_OPT",
98
  "MAC_PREFIX_OPT",
99
  "MAINTAIN_NODE_HEALTH_OPT",
100
  "MASTER_NETDEV_OPT",
101
  "MC_OPT",
102
  "MIGRATION_MODE_OPT",
103
  "NET_OPT",
104
  "NEW_CLUSTER_CERT_OPT",
105
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
106
  "NEW_CONFD_HMAC_KEY_OPT",
107
  "NEW_RAPI_CERT_OPT",
108
  "NEW_SECONDARY_OPT",
109
  "NIC_PARAMS_OPT",
110
  "NODE_FORCE_JOIN_OPT",
111
  "NODE_LIST_OPT",
112
  "NODE_PLACEMENT_OPT",
113
  "NODEGROUP_OPT",
114
  "NODE_PARAMS_OPT",
115
  "NODE_POWERED_OPT",
116
  "NODRBD_STORAGE_OPT",
117
  "NOHDR_OPT",
118
  "NOIPCHECK_OPT",
119
  "NO_INSTALL_OPT",
120
  "NONAMECHECK_OPT",
121
  "NOLVM_STORAGE_OPT",
122
  "NOMODIFY_ETCHOSTS_OPT",
123
  "NOMODIFY_SSH_SETUP_OPT",
124
  "NONICS_OPT",
125
  "NONLIVE_OPT",
126
  "NONPLUS1_OPT",
127
  "NOSHUTDOWN_OPT",
128
  "NOSTART_OPT",
129
  "NOSSH_KEYCHECK_OPT",
130
  "NOVOTING_OPT",
131
  "NWSYNC_OPT",
132
  "ON_PRIMARY_OPT",
133
  "ON_SECONDARY_OPT",
134
  "OFFLINE_OPT",
135
  "OSPARAMS_OPT",
136
  "OS_OPT",
137
  "OS_SIZE_OPT",
138
  "OOB_TIMEOUT_OPT",
139
  "POWER_DELAY_OPT",
140
  "PREALLOC_WIPE_DISKS_OPT",
141
  "PRIMARY_IP_VERSION_OPT",
142
  "PRIORITY_OPT",
143
  "RAPI_CERT_OPT",
144
  "READD_OPT",
145
  "REBOOT_TYPE_OPT",
146
  "REMOVE_INSTANCE_OPT",
147
  "REMOVE_UIDS_OPT",
148
  "RESERVED_LVS_OPT",
149
  "ROMAN_OPT",
150
  "SECONDARY_IP_OPT",
151
  "SELECT_OS_OPT",
152
  "SEP_OPT",
153
  "SHOWCMD_OPT",
154
  "SHUTDOWN_TIMEOUT_OPT",
155
  "SINGLE_NODE_OPT",
156
  "SRC_DIR_OPT",
157
  "SRC_NODE_OPT",
158
  "SUBMIT_OPT",
159
  "STATIC_OPT",
160
  "SYNC_OPT",
161
  "TAG_SRC_OPT",
162
  "TIMEOUT_OPT",
163
  "UIDPOOL_OPT",
164
  "USEUNITS_OPT",
165
  "USE_REPL_NET_OPT",
166
  "VERBOSE_OPT",
167
  "VG_NAME_OPT",
168
  "YES_DOIT_OPT",
169
  # Generic functions for CLI programs
170
  "ConfirmOperation",
171
  "GenericMain",
172
  "GenericInstanceCreate",
173
  "GenericList",
174
  "GenericListFields",
175
  "GetClient",
176
  "GetOnlineNodes",
177
  "JobExecutor",
178
  "JobSubmittedException",
179
  "ParseTimespec",
180
  "RunWhileClusterStopped",
181
  "SubmitOpCode",
182
  "SubmitOrSend",
183
  "UsesRPC",
184
  # Formatting functions
185
  "ToStderr", "ToStdout",
186
  "FormatError",
187
  "FormatQueryResult",
188
  "FormatParameterDict",
189
  "GenerateTable",
190
  "AskUser",
191
  "FormatTimestamp",
192
  "FormatLogMessage",
193
  # Tags functions
194
  "ListTags",
195
  "AddTags",
196
  "RemoveTags",
197
  # command line options support infrastructure
198
  "ARGS_MANY_INSTANCES",
199
  "ARGS_MANY_NODES",
200
  "ARGS_MANY_GROUPS",
201
  "ARGS_NONE",
202
  "ARGS_ONE_INSTANCE",
203
  "ARGS_ONE_NODE",
204
  "ARGS_ONE_GROUP",
205
  "ARGS_ONE_OS",
206
  "ArgChoice",
207
  "ArgCommand",
208
  "ArgFile",
209
  "ArgGroup",
210
  "ArgHost",
211
  "ArgInstance",
212
  "ArgJobId",
213
  "ArgNode",
214
  "ArgOs",
215
  "ArgSuggest",
216
  "ArgUnknown",
217
  "OPT_COMPL_INST_ADD_NODES",
218
  "OPT_COMPL_MANY_NODES",
219
  "OPT_COMPL_ONE_IALLOCATOR",
220
  "OPT_COMPL_ONE_INSTANCE",
221
  "OPT_COMPL_ONE_NODE",
222
  "OPT_COMPL_ONE_NODEGROUP",
223
  "OPT_COMPL_ONE_OS",
224
  "cli_option",
225
  "SplitNodeOption",
226
  "CalculateOSNames",
227
  "ParseFields",
228
  "COMMON_CREATE_OPTS",
229
  ]
230

    
231
NO_PREFIX = "no_"
232
UN_PREFIX = "-"
233

    
234
#: Priorities (sorted)
235
_PRIORITY_NAMES = [
236
  ("low", constants.OP_PRIO_LOW),
237
  ("normal", constants.OP_PRIO_NORMAL),
238
  ("high", constants.OP_PRIO_HIGH),
239
  ]
240

    
241
#: Priority dictionary for easier lookup
242
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
243
# we migrate to Python 2.6
244
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
245

    
246
# Query result status for clients
247
(QR_NORMAL,
248
 QR_UNKNOWN,
249
 QR_INCOMPLETE) = range(3)
250

    
251

    
252
class _Argument:
253
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
254
    self.min = min
255
    self.max = max
256

    
257
  def __repr__(self):
258
    return ("<%s min=%s max=%s>" %
259
            (self.__class__.__name__, self.min, self.max))
260

    
261

    
262
class ArgSuggest(_Argument):
263
  """Suggesting argument.
264

265
  Value can be any of the ones passed to the constructor.
266

267
  """
268
  # pylint: disable-msg=W0622
269
  def __init__(self, min=0, max=None, choices=None):
270
    _Argument.__init__(self, min=min, max=max)
271
    self.choices = choices
272

    
273
  def __repr__(self):
274
    return ("<%s min=%s max=%s choices=%r>" %
275
            (self.__class__.__name__, self.min, self.max, self.choices))
276

    
277

    
278
class ArgChoice(ArgSuggest):
279
  """Choice argument.
280

281
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
282
  but value must be one of the choices.
283

284
  """
285

    
286

    
287
class ArgUnknown(_Argument):
288
  """Unknown argument to program (e.g. determined at runtime).
289

290
  """
291

    
292

    
293
class ArgInstance(_Argument):
294
  """Instances argument.
295

296
  """
297

    
298

    
299
class ArgNode(_Argument):
300
  """Node argument.
301

302
  """
303

    
304

    
305
class ArgGroup(_Argument):
306
  """Node group argument.
307

308
  """
309

    
310

    
311
class ArgJobId(_Argument):
312
  """Job ID argument.
313

314
  """
315

    
316

    
317
class ArgFile(_Argument):
318
  """File path argument.
319

320
  """
321

    
322

    
323
class ArgCommand(_Argument):
324
  """Command argument.
325

326
  """
327

    
328

    
329
class ArgHost(_Argument):
330
  """Host argument.
331

332
  """
333

    
334

    
335
class ArgOs(_Argument):
336
  """OS argument.
337

338
  """
339

    
340

    
341
ARGS_NONE = []
342
ARGS_MANY_INSTANCES = [ArgInstance()]
343
ARGS_MANY_NODES = [ArgNode()]
344
ARGS_MANY_GROUPS = [ArgGroup()]
345
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
346
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
347
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
348
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
349

    
350

    
351
def _ExtractTagsObject(opts, args):
352
  """Extract the tag type object.
353

354
  Note that this function will modify its args parameter.
355

356
  """
357
  if not hasattr(opts, "tag_type"):
358
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
359
  kind = opts.tag_type
360
  if kind == constants.TAG_CLUSTER:
361
    retval = kind, kind
362
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
363
    if not args:
364
      raise errors.OpPrereqError("no arguments passed to the command")
365
    name = args.pop(0)
366
    retval = kind, name
367
  else:
368
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
369
  return retval
370

    
371

    
372
def _ExtendTags(opts, args):
373
  """Extend the args if a source file has been given.
374

375
  This function will extend the tags with the contents of the file
376
  passed in the 'tags_source' attribute of the opts parameter. A file
377
  named '-' will be replaced by stdin.
378

379
  """
380
  fname = opts.tags_source
381
  if fname is None:
382
    return
383
  if fname == "-":
384
    new_fh = sys.stdin
385
  else:
386
    new_fh = open(fname, "r")
387
  new_data = []
388
  try:
389
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
390
    # because of python bug 1633941
391
    while True:
392
      line = new_fh.readline()
393
      if not line:
394
        break
395
      new_data.append(line.strip())
396
  finally:
397
    new_fh.close()
398
  args.extend(new_data)
399

    
400

    
401
def ListTags(opts, args):
402
  """List the tags on a given object.
403

404
  This is a generic implementation that knows how to deal with all
405
  three cases of tag objects (cluster, node, instance). The opts
406
  argument is expected to contain a tag_type field denoting what
407
  object type we work on.
408

409
  """
410
  kind, name = _ExtractTagsObject(opts, args)
411
  cl = GetClient()
412
  result = cl.QueryTags(kind, name)
413
  result = list(result)
414
  result.sort()
415
  for tag in result:
416
    ToStdout(tag)
417

    
418

    
419
def AddTags(opts, args):
420
  """Add tags on a given object.
421

422
  This is a generic implementation that knows how to deal with all
423
  three cases of tag objects (cluster, node, instance). The opts
424
  argument is expected to contain a tag_type field denoting what
425
  object type we work on.
426

427
  """
428
  kind, name = _ExtractTagsObject(opts, args)
429
  _ExtendTags(opts, args)
430
  if not args:
431
    raise errors.OpPrereqError("No tags to be added")
432
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
433
  SubmitOpCode(op, opts=opts)
434

    
435

    
436
def RemoveTags(opts, args):
437
  """Remove tags from a given object.
438

439
  This is a generic implementation that knows how to deal with all
440
  three cases of tag objects (cluster, node, instance). The opts
441
  argument is expected to contain a tag_type field denoting what
442
  object type we work on.
443

444
  """
445
  kind, name = _ExtractTagsObject(opts, args)
446
  _ExtendTags(opts, args)
447
  if not args:
448
    raise errors.OpPrereqError("No tags to be removed")
449
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
450
  SubmitOpCode(op, opts=opts)
451

    
452

    
453
def check_unit(option, opt, value): # pylint: disable-msg=W0613
454
  """OptParsers custom converter for units.
455

456
  """
457
  try:
458
    return utils.ParseUnit(value)
459
  except errors.UnitParseError, err:
460
    raise OptionValueError("option %s: %s" % (opt, err))
461

    
462

    
463
def _SplitKeyVal(opt, data):
464
  """Convert a KeyVal string into a dict.
465

466
  This function will convert a key=val[,...] string into a dict. Empty
467
  values will be converted specially: keys which have the prefix 'no_'
468
  will have the value=False and the prefix stripped, the others will
469
  have value=True.
470

471
  @type opt: string
472
  @param opt: a string holding the option name for which we process the
473
      data, used in building error messages
474
  @type data: string
475
  @param data: a string of the format key=val,key=val,...
476
  @rtype: dict
477
  @return: {key=val, key=val}
478
  @raises errors.ParameterError: if there are duplicate keys
479

480
  """
481
  kv_dict = {}
482
  if data:
483
    for elem in utils.UnescapeAndSplit(data, sep=","):
484
      if "=" in elem:
485
        key, val = elem.split("=", 1)
486
      else:
487
        if elem.startswith(NO_PREFIX):
488
          key, val = elem[len(NO_PREFIX):], False
489
        elif elem.startswith(UN_PREFIX):
490
          key, val = elem[len(UN_PREFIX):], None
491
        else:
492
          key, val = elem, True
493
      if key in kv_dict:
494
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
495
                                    (key, opt))
496
      kv_dict[key] = val
497
  return kv_dict
498

    
499

    
500
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
501
  """Custom parser for ident:key=val,key=val options.
502

503
  This will store the parsed values as a tuple (ident, {key: val}). As such,
504
  multiple uses of this option via action=append is possible.
505

506
  """
507
  if ":" not in value:
508
    ident, rest = value, ''
509
  else:
510
    ident, rest = value.split(":", 1)
511

    
512
  if ident.startswith(NO_PREFIX):
513
    if rest:
514
      msg = "Cannot pass options when removing parameter groups: %s" % value
515
      raise errors.ParameterError(msg)
516
    retval = (ident[len(NO_PREFIX):], False)
517
  elif ident.startswith(UN_PREFIX):
518
    if rest:
519
      msg = "Cannot pass options when removing parameter groups: %s" % value
520
      raise errors.ParameterError(msg)
521
    retval = (ident[len(UN_PREFIX):], None)
522
  else:
523
    kv_dict = _SplitKeyVal(opt, rest)
524
    retval = (ident, kv_dict)
525
  return retval
526

    
527

    
528
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
529
  """Custom parser class for key=val,key=val options.
530

531
  This will store the parsed values as a dict {key: val}.
532

533
  """
534
  return _SplitKeyVal(opt, value)
535

    
536

    
537
def check_bool(option, opt, value): # pylint: disable-msg=W0613
538
  """Custom parser for yes/no options.
539

540
  This will store the parsed value as either True or False.
541

542
  """
543
  value = value.lower()
544
  if value == constants.VALUE_FALSE or value == "no":
545
    return False
546
  elif value == constants.VALUE_TRUE or value == "yes":
547
    return True
548
  else:
549
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
550

    
551

    
552
# completion_suggestion is normally a list. Using numeric values not evaluating
553
# to False for dynamic completion.
554
(OPT_COMPL_MANY_NODES,
555
 OPT_COMPL_ONE_NODE,
556
 OPT_COMPL_ONE_INSTANCE,
557
 OPT_COMPL_ONE_OS,
558
 OPT_COMPL_ONE_IALLOCATOR,
559
 OPT_COMPL_INST_ADD_NODES,
560
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
561

    
562
OPT_COMPL_ALL = frozenset([
563
  OPT_COMPL_MANY_NODES,
564
  OPT_COMPL_ONE_NODE,
565
  OPT_COMPL_ONE_INSTANCE,
566
  OPT_COMPL_ONE_OS,
567
  OPT_COMPL_ONE_IALLOCATOR,
568
  OPT_COMPL_INST_ADD_NODES,
569
  OPT_COMPL_ONE_NODEGROUP,
570
  ])
571

    
572

    
573
class CliOption(Option):
574
  """Custom option class for optparse.
575

576
  """
577
  ATTRS = Option.ATTRS + [
578
    "completion_suggest",
579
    ]
580
  TYPES = Option.TYPES + (
581
    "identkeyval",
582
    "keyval",
583
    "unit",
584
    "bool",
585
    )
586
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
587
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
588
  TYPE_CHECKER["keyval"] = check_key_val
589
  TYPE_CHECKER["unit"] = check_unit
590
  TYPE_CHECKER["bool"] = check_bool
591

    
592

    
593
# optparse.py sets make_option, so we do it for our own option class, too
594
cli_option = CliOption
595

    
596

    
597
_YORNO = "yes|no"
598

    
599
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
600
                       help="Increase debugging level")
601

    
602
NOHDR_OPT = cli_option("--no-headers", default=False,
603
                       action="store_true", dest="no_headers",
604
                       help="Don't display column headers")
605

    
606
SEP_OPT = cli_option("--separator", default=None,
607
                     action="store", dest="separator",
608
                     help=("Separator between output fields"
609
                           " (defaults to one space)"))
610

    
611
USEUNITS_OPT = cli_option("--units", default=None,
612
                          dest="units", choices=('h', 'm', 'g', 't'),
613
                          help="Specify units for output (one of h/m/g/t)")
614

    
615
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
616
                        type="string", metavar="FIELDS",
617
                        help="Comma separated list of output fields")
618

    
619
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
620
                       default=False, help="Force the operation")
621

    
622
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
623
                         default=False, help="Do not require confirmation")
624

    
625
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
626
                                  action="store_true", default=False,
627
                                  help=("Ignore offline nodes and do as much"
628
                                        " as possible"))
629

    
630
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
631
                         default=None, help="File with tag names")
632

    
633
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
634
                        default=False, action="store_true",
635
                        help=("Submit the job and return the job ID, but"
636
                              " don't wait for the job to finish"))
637

    
638
SYNC_OPT = cli_option("--sync", dest="do_locking",
639
                      default=False, action="store_true",
640
                      help=("Grab locks while doing the queries"
641
                            " in order to ensure more consistent results"))
642

    
643
DRY_RUN_OPT = cli_option("--dry-run", default=False,
644
                         action="store_true",
645
                         help=("Do not execute the operation, just run the"
646
                               " check steps and verify it it could be"
647
                               " executed"))
648

    
649
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
650
                         action="store_true",
651
                         help="Increase the verbosity of the operation")
652

    
653
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
654
                              action="store_true", dest="simulate_errors",
655
                              help="Debugging option that makes the operation"
656
                              " treat most runtime checks as failed")
657

    
658
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
659
                        default=True, action="store_false",
660
                        help="Don't wait for sync (DANGEROUS!)")
661

    
662
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
663
                               help="Custom disk setup (diskless, file,"
664
                               " plain or drbd)",
665
                               default=None, metavar="TEMPL",
666
                               choices=list(constants.DISK_TEMPLATES))
667

    
668
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
669
                        help="Do not create any network cards for"
670
                        " the instance")
671

    
672
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
673
                               help="Relative path under default cluster-wide"
674
                               " file storage dir to store file-based disks",
675
                               default=None, metavar="<DIR>")
676

    
677
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
678
                                  help="Driver to use for image files",
679
                                  default="loop", metavar="<DRIVER>",
680
                                  choices=list(constants.FILE_DRIVER))
681

    
682
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
683
                            help="Select nodes for the instance automatically"
684
                            " using the <NAME> iallocator plugin",
685
                            default=None, type="string",
686
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
687

    
688
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
689
                            metavar="<NAME>",
690
                            help="Set the default instance allocator plugin",
691
                            default=None, type="string",
692
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
693

    
694
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
695
                    metavar="<os>",
696
                    completion_suggest=OPT_COMPL_ONE_OS)
697

    
698
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
699
                         type="keyval", default={},
700
                         help="OS parameters")
701

    
702
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
703
                               action="store_true", default=False,
704
                               help="Force an unknown variant")
705

    
706
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
707
                            action="store_true", default=False,
708
                            help="Do not install the OS (will"
709
                            " enable no-start)")
710

    
711
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
712
                         type="keyval", default={},
713
                         help="Backend parameters")
714

    
715
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
716
                         default={}, dest="hvparams",
717
                         help="Hypervisor parameters")
718

    
719
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
720
                            help="Hypervisor and hypervisor options, in the"
721
                            " format hypervisor:option=value,option=value,...",
722
                            default=None, type="identkeyval")
723

    
724
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
725
                        help="Hypervisor and hypervisor options, in the"
726
                        " format hypervisor:option=value,option=value,...",
727
                        default=[], action="append", type="identkeyval")
728

    
729
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
730
                           action="store_false",
731
                           help="Don't check that the instance's IP"
732
                           " is alive")
733

    
734
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
735
                             default=True, action="store_false",
736
                             help="Don't check that the instance's name"
737
                             " is resolvable")
738

    
739
NET_OPT = cli_option("--net",
740
                     help="NIC parameters", default=[],
741
                     dest="nics", action="append", type="identkeyval")
742

    
743
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
744
                      dest="disks", action="append", type="identkeyval")
745

    
746
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
747
                         help="Comma-separated list of disks"
748
                         " indices to act on (e.g. 0,2) (optional,"
749
                         " defaults to all disks)")
750

    
751
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
752
                         help="Enforces a single-disk configuration using the"
753
                         " given disk size, in MiB unless a suffix is used",
754
                         default=None, type="unit", metavar="<size>")
755

    
756
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
757
                                dest="ignore_consistency",
758
                                action="store_true", default=False,
759
                                help="Ignore the consistency of the disks on"
760
                                " the secondary")
761

    
762
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
763
                                dest="allow_failover",
764
                                action="store_true", default=False,
765
                                help="If migration is not possible fallback to"
766
                                     " failover")
767

    
768
NONLIVE_OPT = cli_option("--non-live", dest="live",
769
                         default=True, action="store_false",
770
                         help="Do a non-live migration (this usually means"
771
                         " freeze the instance, save the state, transfer and"
772
                         " only then resume running on the secondary node)")
773

    
774
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
775
                                default=None,
776
                                choices=list(constants.HT_MIGRATION_MODES),
777
                                help="Override default migration mode (choose"
778
                                " either live or non-live")
779

    
780
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
781
                                help="Target node and optional secondary node",
782
                                metavar="<pnode>[:<snode>]",
783
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
784

    
785
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
786
                           action="append", metavar="<node>",
787
                           help="Use only this node (can be used multiple"
788
                           " times, if not given defaults to all nodes)",
789
                           completion_suggest=OPT_COMPL_ONE_NODE)
790

    
791
NODEGROUP_OPT = cli_option("-g", "--node-group",
792
                           dest="nodegroup",
793
                           help="Node group (name or uuid)",
794
                           metavar="<nodegroup>",
795
                           default=None, type="string",
796
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
797

    
798
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
799
                             metavar="<node>",
800
                             completion_suggest=OPT_COMPL_ONE_NODE)
801

    
802
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
803
                         action="store_false",
804
                         help="Don't start the instance after creation")
805

    
806
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
807
                         action="store_true", default=False,
808
                         help="Show command instead of executing it")
809

    
810
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
811
                         default=False, action="store_true",
812
                         help="Instead of performing the migration, try to"
813
                         " recover from a failed cleanup. This is safe"
814
                         " to run even if the instance is healthy, but it"
815
                         " will create extra replication traffic and "
816
                         " disrupt briefly the replication (like during the"
817
                         " migration")
818

    
819
STATIC_OPT = cli_option("-s", "--static", dest="static",
820
                        action="store_true", default=False,
821
                        help="Only show configuration data, not runtime data")
822

    
823
ALL_OPT = cli_option("--all", dest="show_all",
824
                     default=False, action="store_true",
825
                     help="Show info on all instances on the cluster."
826
                     " This can take a long time to run, use wisely")
827

    
828
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
829
                           action="store_true", default=False,
830
                           help="Interactive OS reinstall, lists available"
831
                           " OS templates for selection")
832

    
833
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
834
                                 action="store_true", default=False,
835
                                 help="Remove the instance from the cluster"
836
                                 " configuration even if there are failures"
837
                                 " during the removal process")
838

    
839
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
840
                                        dest="ignore_remove_failures",
841
                                        action="store_true", default=False,
842
                                        help="Remove the instance from the"
843
                                        " cluster configuration even if there"
844
                                        " are failures during the removal"
845
                                        " process")
846

    
847
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
848
                                 action="store_true", default=False,
849
                                 help="Remove the instance from the cluster")
850

    
851
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
852
                               help="Specifies the new node for the instance",
853
                               metavar="NODE", default=None,
854
                               completion_suggest=OPT_COMPL_ONE_NODE)
855

    
856
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
857
                               help="Specifies the new secondary node",
858
                               metavar="NODE", default=None,
859
                               completion_suggest=OPT_COMPL_ONE_NODE)
860

    
861
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
862
                            default=False, action="store_true",
863
                            help="Replace the disk(s) on the primary"
864
                            " node (only for the drbd template)")
865

    
866
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
867
                              default=False, action="store_true",
868
                              help="Replace the disk(s) on the secondary"
869
                              " node (only for the drbd template)")
870

    
871
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
872
                              default=False, action="store_true",
873
                              help="Lock all nodes and auto-promote as needed"
874
                              " to MC status")
875

    
876
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
877
                              default=False, action="store_true",
878
                              help="Automatically replace faulty disks"
879
                              " (only for the drbd template)")
880

    
881
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
882
                             default=False, action="store_true",
883
                             help="Ignore current recorded size"
884
                             " (useful for forcing activation when"
885
                             " the recorded size is wrong)")
886

    
887
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
888
                          metavar="<node>",
889
                          completion_suggest=OPT_COMPL_ONE_NODE)
890

    
891
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
892
                         metavar="<dir>")
893

    
894
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
895
                              help="Specify the secondary ip for the node",
896
                              metavar="ADDRESS", default=None)
897

    
898
READD_OPT = cli_option("--readd", dest="readd",
899
                       default=False, action="store_true",
900
                       help="Readd old node after replacing it")
901

    
902
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
903
                                default=True, action="store_false",
904
                                help="Disable SSH key fingerprint checking")
905

    
906
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
907
                                 default=False, action="store_true",
908
                                 help="Force the joining of a node")
909

    
910
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
911
                    type="bool", default=None, metavar=_YORNO,
912
                    help="Set the master_candidate flag on the node")
913

    
914
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
915
                         type="bool", default=None,
916
                         help=("Set the offline flag on the node"
917
                               " (cluster does not communicate with offline"
918
                               " nodes)"))
919

    
920
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
921
                         type="bool", default=None,
922
                         help=("Set the drained flag on the node"
923
                               " (excluded from allocation operations)"))
924

    
925
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
926
                    type="bool", default=None, metavar=_YORNO,
927
                    help="Set the master_capable flag on the node")
928

    
929
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
930
                    type="bool", default=None, metavar=_YORNO,
931
                    help="Set the vm_capable flag on the node")
932

    
933
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
934
                             type="bool", default=None, metavar=_YORNO,
935
                             help="Set the allocatable flag on a volume")
936

    
937
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
938
                               help="Disable support for lvm based instances"
939
                               " (cluster-wide)",
940
                               action="store_false", default=True)
941

    
942
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
943
                            dest="enabled_hypervisors",
944
                            help="Comma-separated list of hypervisors",
945
                            type="string", default=None)
946

    
947
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
948
                            type="keyval", default={},
949
                            help="NIC parameters")
950

    
951
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
952
                         dest="candidate_pool_size", type="int",
953
                         help="Set the candidate pool size")
954

    
955
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
956
                         help=("Enables LVM and specifies the volume group"
957
                               " name (cluster-wide) for disk allocation"
958
                               " [%s]" % constants.DEFAULT_VG),
959
                         metavar="VG", default=None)
960

    
961
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
962
                          help="Destroy cluster", action="store_true")
963

    
964
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
965
                          help="Skip node agreement check (dangerous)",
966
                          action="store_true", default=False)
967

    
968
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
969
                            help="Specify the mac prefix for the instance IP"
970
                            " addresses, in the format XX:XX:XX",
971
                            metavar="PREFIX",
972
                            default=None)
973

    
974
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
975
                               help="Specify the node interface (cluster-wide)"
976
                               " on which the master IP address will be added"
977
                               " (cluster init default: %s)" %
978
                               constants.DEFAULT_BRIDGE,
979
                               metavar="NETDEV",
980
                               default=None)
981

    
982
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
983
                                help="Specify the default directory (cluster-"
984
                                "wide) for storing the file-based disks [%s]" %
985
                                constants.DEFAULT_FILE_STORAGE_DIR,
986
                                metavar="DIR",
987
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
988

    
989
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
990
                            dest="shared_file_storage_dir",
991
                            help="Specify the default directory (cluster-"
992
                            "wide) for storing the shared file-based"
993
                            " disks [%s]" %
994
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
995
                            metavar="SHAREDDIR",
996
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
997

    
998
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
999
                                   help="Don't modify /etc/hosts",
1000
                                   action="store_false", default=True)
1001

    
1002
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1003
                                    help="Don't initialize SSH keys",
1004
                                    action="store_false", default=True)
1005

    
1006
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1007
                             help="Enable parseable error messages",
1008
                             action="store_true", default=False)
1009

    
1010
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1011
                          help="Skip N+1 memory redundancy tests",
1012
                          action="store_true", default=False)
1013

    
1014
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1015
                             help="Type of reboot: soft/hard/full",
1016
                             default=constants.INSTANCE_REBOOT_HARD,
1017
                             metavar="<REBOOT>",
1018
                             choices=list(constants.REBOOT_TYPES))
1019

    
1020
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1021
                                    dest="ignore_secondaries",
1022
                                    default=False, action="store_true",
1023
                                    help="Ignore errors from secondaries")
1024

    
1025
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1026
                            action="store_false", default=True,
1027
                            help="Don't shutdown the instance (unsafe)")
1028

    
1029
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1030
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1031
                         help="Maximum time to wait")
1032

    
1033
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1034
                         dest="shutdown_timeout", type="int",
1035
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1036
                         help="Maximum time to wait for instance shutdown")
1037

    
1038
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1039
                          default=None,
1040
                          help=("Number of seconds between repetions of the"
1041
                                " command"))
1042

    
1043
EARLY_RELEASE_OPT = cli_option("--early-release",
1044
                               dest="early_release", default=False,
1045
                               action="store_true",
1046
                               help="Release the locks on the secondary"
1047
                               " node(s) early")
1048

    
1049
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1050
                                  dest="new_cluster_cert",
1051
                                  default=False, action="store_true",
1052
                                  help="Generate a new cluster certificate")
1053

    
1054
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1055
                           default=None,
1056
                           help="File containing new RAPI certificate")
1057

    
1058
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1059
                               default=None, action="store_true",
1060
                               help=("Generate a new self-signed RAPI"
1061
                                     " certificate"))
1062

    
1063
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1064
                                    dest="new_confd_hmac_key",
1065
                                    default=False, action="store_true",
1066
                                    help=("Create a new HMAC key for %s" %
1067
                                          constants.CONFD))
1068

    
1069
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1070
                                       dest="cluster_domain_secret",
1071
                                       default=None,
1072
                                       help=("Load new new cluster domain"
1073
                                             " secret from file"))
1074

    
1075
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1076
                                           dest="new_cluster_domain_secret",
1077
                                           default=False, action="store_true",
1078
                                           help=("Create a new cluster domain"
1079
                                                 " secret"))
1080

    
1081
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1082
                              dest="use_replication_network",
1083
                              help="Whether to use the replication network"
1084
                              " for talking to the nodes",
1085
                              action="store_true", default=False)
1086

    
1087
MAINTAIN_NODE_HEALTH_OPT = \
1088
    cli_option("--maintain-node-health", dest="maintain_node_health",
1089
               metavar=_YORNO, default=None, type="bool",
1090
               help="Configure the cluster to automatically maintain node"
1091
               " health, by shutting down unknown instances, shutting down"
1092
               " unknown DRBD devices, etc.")
1093

    
1094
IDENTIFY_DEFAULTS_OPT = \
1095
    cli_option("--identify-defaults", dest="identify_defaults",
1096
               default=False, action="store_true",
1097
               help="Identify which saved instance parameters are equal to"
1098
               " the current cluster defaults and set them as such, instead"
1099
               " of marking them as overridden")
1100

    
1101
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1102
                         action="store", dest="uid_pool",
1103
                         help=("A list of user-ids or user-id"
1104
                               " ranges separated by commas"))
1105

    
1106
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1107
                          action="store", dest="add_uids",
1108
                          help=("A list of user-ids or user-id"
1109
                                " ranges separated by commas, to be"
1110
                                " added to the user-id pool"))
1111

    
1112
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1113
                             action="store", dest="remove_uids",
1114
                             help=("A list of user-ids or user-id"
1115
                                   " ranges separated by commas, to be"
1116
                                   " removed from the user-id pool"))
1117

    
1118
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1119
                             action="store", dest="reserved_lvs",
1120
                             help=("A comma-separated list of reserved"
1121
                                   " logical volumes names, that will be"
1122
                                   " ignored by cluster verify"))
1123

    
1124
ROMAN_OPT = cli_option("--roman",
1125
                       dest="roman_integers", default=False,
1126
                       action="store_true",
1127
                       help="Use roman numbers for positive integers")
1128

    
1129
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1130
                             action="store", default=None,
1131
                             help="Specifies usermode helper for DRBD")
1132

    
1133
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1134
                                action="store_false", default=True,
1135
                                help="Disable support for DRBD")
1136

    
1137
PRIMARY_IP_VERSION_OPT = \
1138
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1139
               action="store", dest="primary_ip_version",
1140
               metavar="%d|%d" % (constants.IP4_VERSION,
1141
                                  constants.IP6_VERSION),
1142
               help="Cluster-wide IP version for primary IP")
1143

    
1144
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1145
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1146
                          choices=_PRIONAME_TO_VALUE.keys(),
1147
                          help="Priority for opcode processing")
1148

    
1149
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1150
                        type="bool", default=None, metavar=_YORNO,
1151
                        help="Sets the hidden flag on the OS")
1152

    
1153
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1154
                        type="bool", default=None, metavar=_YORNO,
1155
                        help="Sets the blacklisted flag on the OS")
1156

    
1157
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1158
                                     type="bool", metavar=_YORNO,
1159
                                     dest="prealloc_wipe_disks",
1160
                                     help=("Wipe disks prior to instance"
1161
                                           " creation"))
1162

    
1163
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1164
                             type="keyval", default=None,
1165
                             help="Node parameters")
1166

    
1167
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1168
                              action="store", metavar="POLICY", default=None,
1169
                              help="Allocation policy for the node group")
1170

    
1171
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1172
                              type="bool", metavar=_YORNO,
1173
                              dest="node_powered",
1174
                              help="Specify if the SoR for node is powered")
1175

    
1176
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1177
                         default=constants.OOB_TIMEOUT,
1178
                         help="Maximum time to wait for out-of-band helper")
1179

    
1180
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1181
                             default=constants.OOB_POWER_DELAY,
1182
                             help="Time in seconds to wait between power-ons")
1183

    
1184

    
1185
#: Options provided by all commands
1186
COMMON_OPTS = [DEBUG_OPT]
1187

    
1188
# common options for creating instances. add and import then add their own
1189
# specific ones.
1190
COMMON_CREATE_OPTS = [
1191
  BACKEND_OPT,
1192
  DISK_OPT,
1193
  DISK_TEMPLATE_OPT,
1194
  FILESTORE_DIR_OPT,
1195
  FILESTORE_DRIVER_OPT,
1196
  HYPERVISOR_OPT,
1197
  IALLOCATOR_OPT,
1198
  NET_OPT,
1199
  NODE_PLACEMENT_OPT,
1200
  NOIPCHECK_OPT,
1201
  NONAMECHECK_OPT,
1202
  NONICS_OPT,
1203
  NWSYNC_OPT,
1204
  OSPARAMS_OPT,
1205
  OS_SIZE_OPT,
1206
  SUBMIT_OPT,
1207
  DRY_RUN_OPT,
1208
  PRIORITY_OPT,
1209
  ]
1210

    
1211

    
1212
def _ParseArgs(argv, commands, aliases):
1213
  """Parser for the command line arguments.
1214

1215
  This function parses the arguments and returns the function which
1216
  must be executed together with its (modified) arguments.
1217

1218
  @param argv: the command line
1219
  @param commands: dictionary with special contents, see the design
1220
      doc for cmdline handling
1221
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1222

1223
  """
1224
  if len(argv) == 0:
1225
    binary = "<command>"
1226
  else:
1227
    binary = argv[0].split("/")[-1]
1228

    
1229
  if len(argv) > 1 and argv[1] == "--version":
1230
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1231
             constants.RELEASE_VERSION)
1232
    # Quit right away. That way we don't have to care about this special
1233
    # argument. optparse.py does it the same.
1234
    sys.exit(0)
1235

    
1236
  if len(argv) < 2 or not (argv[1] in commands or
1237
                           argv[1] in aliases):
1238
    # let's do a nice thing
1239
    sortedcmds = commands.keys()
1240
    sortedcmds.sort()
1241

    
1242
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1243
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1244
    ToStdout("")
1245

    
1246
    # compute the max line length for cmd + usage
1247
    mlen = max([len(" %s" % cmd) for cmd in commands])
1248
    mlen = min(60, mlen) # should not get here...
1249

    
1250
    # and format a nice command list
1251
    ToStdout("Commands:")
1252
    for cmd in sortedcmds:
1253
      cmdstr = " %s" % (cmd,)
1254
      help_text = commands[cmd][4]
1255
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1256
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1257
      for line in help_lines:
1258
        ToStdout("%-*s   %s", mlen, "", line)
1259

    
1260
    ToStdout("")
1261

    
1262
    return None, None, None
1263

    
1264
  # get command, unalias it, and look it up in commands
1265
  cmd = argv.pop(1)
1266
  if cmd in aliases:
1267
    if cmd in commands:
1268
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1269
                                   " command" % cmd)
1270

    
1271
    if aliases[cmd] not in commands:
1272
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1273
                                   " command '%s'" % (cmd, aliases[cmd]))
1274

    
1275
    cmd = aliases[cmd]
1276

    
1277
  func, args_def, parser_opts, usage, description = commands[cmd]
1278
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1279
                        description=description,
1280
                        formatter=TitledHelpFormatter(),
1281
                        usage="%%prog %s %s" % (cmd, usage))
1282
  parser.disable_interspersed_args()
1283
  options, args = parser.parse_args()
1284

    
1285
  if not _CheckArguments(cmd, args_def, args):
1286
    return None, None, None
1287

    
1288
  return func, options, args
1289

    
1290

    
1291
def _CheckArguments(cmd, args_def, args):
1292
  """Verifies the arguments using the argument definition.
1293

1294
  Algorithm:
1295

1296
    1. Abort with error if values specified by user but none expected.
1297

1298
    1. For each argument in definition
1299

1300
      1. Keep running count of minimum number of values (min_count)
1301
      1. Keep running count of maximum number of values (max_count)
1302
      1. If it has an unlimited number of values
1303

1304
        1. Abort with error if it's not the last argument in the definition
1305

1306
    1. If last argument has limited number of values
1307

1308
      1. Abort with error if number of values doesn't match or is too large
1309

1310
    1. Abort with error if user didn't pass enough values (min_count)
1311

1312
  """
1313
  if args and not args_def:
1314
    ToStderr("Error: Command %s expects no arguments", cmd)
1315
    return False
1316

    
1317
  min_count = None
1318
  max_count = None
1319
  check_max = None
1320

    
1321
  last_idx = len(args_def) - 1
1322

    
1323
  for idx, arg in enumerate(args_def):
1324
    if min_count is None:
1325
      min_count = arg.min
1326
    elif arg.min is not None:
1327
      min_count += arg.min
1328

    
1329
    if max_count is None:
1330
      max_count = arg.max
1331
    elif arg.max is not None:
1332
      max_count += arg.max
1333

    
1334
    if idx == last_idx:
1335
      check_max = (arg.max is not None)
1336

    
1337
    elif arg.max is None:
1338
      raise errors.ProgrammerError("Only the last argument can have max=None")
1339

    
1340
  if check_max:
1341
    # Command with exact number of arguments
1342
    if (min_count is not None and max_count is not None and
1343
        min_count == max_count and len(args) != min_count):
1344
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1345
      return False
1346

    
1347
    # Command with limited number of arguments
1348
    if max_count is not None and len(args) > max_count:
1349
      ToStderr("Error: Command %s expects only %d argument(s)",
1350
               cmd, max_count)
1351
      return False
1352

    
1353
  # Command with some required arguments
1354
  if min_count is not None and len(args) < min_count:
1355
    ToStderr("Error: Command %s expects at least %d argument(s)",
1356
             cmd, min_count)
1357
    return False
1358

    
1359
  return True
1360

    
1361

    
1362
def SplitNodeOption(value):
1363
  """Splits the value of a --node option.
1364

1365
  """
1366
  if value and ':' in value:
1367
    return value.split(':', 1)
1368
  else:
1369
    return (value, None)
1370

    
1371

    
1372
def CalculateOSNames(os_name, os_variants):
1373
  """Calculates all the names an OS can be called, according to its variants.
1374

1375
  @type os_name: string
1376
  @param os_name: base name of the os
1377
  @type os_variants: list or None
1378
  @param os_variants: list of supported variants
1379
  @rtype: list
1380
  @return: list of valid names
1381

1382
  """
1383
  if os_variants:
1384
    return ['%s+%s' % (os_name, v) for v in os_variants]
1385
  else:
1386
    return [os_name]
1387

    
1388

    
1389
def ParseFields(selected, default):
1390
  """Parses the values of "--field"-like options.
1391

1392
  @type selected: string or None
1393
  @param selected: User-selected options
1394
  @type default: list
1395
  @param default: Default fields
1396

1397
  """
1398
  if selected is None:
1399
    return default
1400

    
1401
  if selected.startswith("+"):
1402
    return default + selected[1:].split(",")
1403

    
1404
  return selected.split(",")
1405

    
1406

    
1407
UsesRPC = rpc.RunWithRPC
1408

    
1409

    
1410
def AskUser(text, choices=None):
1411
  """Ask the user a question.
1412

1413
  @param text: the question to ask
1414

1415
  @param choices: list with elements tuples (input_char, return_value,
1416
      description); if not given, it will default to: [('y', True,
1417
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1418
      note that the '?' char is reserved for help
1419

1420
  @return: one of the return values from the choices list; if input is
1421
      not possible (i.e. not running with a tty, we return the last
1422
      entry from the list
1423

1424
  """
1425
  if choices is None:
1426
    choices = [('y', True, 'Perform the operation'),
1427
               ('n', False, 'Do not perform the operation')]
1428
  if not choices or not isinstance(choices, list):
1429
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1430
  for entry in choices:
1431
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1432
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1433

    
1434
  answer = choices[-1][1]
1435
  new_text = []
1436
  for line in text.splitlines():
1437
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1438
  text = "\n".join(new_text)
1439
  try:
1440
    f = file("/dev/tty", "a+")
1441
  except IOError:
1442
    return answer
1443
  try:
1444
    chars = [entry[0] for entry in choices]
1445
    chars[-1] = "[%s]" % chars[-1]
1446
    chars.append('?')
1447
    maps = dict([(entry[0], entry[1]) for entry in choices])
1448
    while True:
1449
      f.write(text)
1450
      f.write('\n')
1451
      f.write("/".join(chars))
1452
      f.write(": ")
1453
      line = f.readline(2).strip().lower()
1454
      if line in maps:
1455
        answer = maps[line]
1456
        break
1457
      elif line == '?':
1458
        for entry in choices:
1459
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1460
        f.write("\n")
1461
        continue
1462
  finally:
1463
    f.close()
1464
  return answer
1465

    
1466

    
1467
class JobSubmittedException(Exception):
1468
  """Job was submitted, client should exit.
1469

1470
  This exception has one argument, the ID of the job that was
1471
  submitted. The handler should print this ID.
1472

1473
  This is not an error, just a structured way to exit from clients.
1474

1475
  """
1476

    
1477

    
1478
def SendJob(ops, cl=None):
1479
  """Function to submit an opcode without waiting for the results.
1480

1481
  @type ops: list
1482
  @param ops: list of opcodes
1483
  @type cl: luxi.Client
1484
  @param cl: the luxi client to use for communicating with the master;
1485
             if None, a new client will be created
1486

1487
  """
1488
  if cl is None:
1489
    cl = GetClient()
1490

    
1491
  job_id = cl.SubmitJob(ops)
1492

    
1493
  return job_id
1494

    
1495

    
1496
def GenericPollJob(job_id, cbs, report_cbs):
1497
  """Generic job-polling function.
1498

1499
  @type job_id: number
1500
  @param job_id: Job ID
1501
  @type cbs: Instance of L{JobPollCbBase}
1502
  @param cbs: Data callbacks
1503
  @type report_cbs: Instance of L{JobPollReportCbBase}
1504
  @param report_cbs: Reporting callbacks
1505

1506
  """
1507
  prev_job_info = None
1508
  prev_logmsg_serial = None
1509

    
1510
  status = None
1511

    
1512
  while True:
1513
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1514
                                      prev_logmsg_serial)
1515
    if not result:
1516
      # job not found, go away!
1517
      raise errors.JobLost("Job with id %s lost" % job_id)
1518

    
1519
    if result == constants.JOB_NOTCHANGED:
1520
      report_cbs.ReportNotChanged(job_id, status)
1521

    
1522
      # Wait again
1523
      continue
1524

    
1525
    # Split result, a tuple of (field values, log entries)
1526
    (job_info, log_entries) = result
1527
    (status, ) = job_info
1528

    
1529
    if log_entries:
1530
      for log_entry in log_entries:
1531
        (serial, timestamp, log_type, message) = log_entry
1532
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1533
                                    log_type, message)
1534
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1535

    
1536
    # TODO: Handle canceled and archived jobs
1537
    elif status in (constants.JOB_STATUS_SUCCESS,
1538
                    constants.JOB_STATUS_ERROR,
1539
                    constants.JOB_STATUS_CANCELING,
1540
                    constants.JOB_STATUS_CANCELED):
1541
      break
1542

    
1543
    prev_job_info = job_info
1544

    
1545
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1546
  if not jobs:
1547
    raise errors.JobLost("Job with id %s lost" % job_id)
1548

    
1549
  status, opstatus, result = jobs[0]
1550

    
1551
  if status == constants.JOB_STATUS_SUCCESS:
1552
    return result
1553

    
1554
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1555
    raise errors.OpExecError("Job was canceled")
1556

    
1557
  has_ok = False
1558
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1559
    if status == constants.OP_STATUS_SUCCESS:
1560
      has_ok = True
1561
    elif status == constants.OP_STATUS_ERROR:
1562
      errors.MaybeRaise(msg)
1563

    
1564
      if has_ok:
1565
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1566
                                 (idx, msg))
1567

    
1568
      raise errors.OpExecError(str(msg))
1569

    
1570
  # default failure mode
1571
  raise errors.OpExecError(result)
1572

    
1573

    
1574
class JobPollCbBase:
1575
  """Base class for L{GenericPollJob} callbacks.
1576

1577
  """
1578
  def __init__(self):
1579
    """Initializes this class.
1580

1581
    """
1582

    
1583
  def WaitForJobChangeOnce(self, job_id, fields,
1584
                           prev_job_info, prev_log_serial):
1585
    """Waits for changes on a job.
1586

1587
    """
1588
    raise NotImplementedError()
1589

    
1590
  def QueryJobs(self, job_ids, fields):
1591
    """Returns the selected fields for the selected job IDs.
1592

1593
    @type job_ids: list of numbers
1594
    @param job_ids: Job IDs
1595
    @type fields: list of strings
1596
    @param fields: Fields
1597

1598
    """
1599
    raise NotImplementedError()
1600

    
1601

    
1602
class JobPollReportCbBase:
1603
  """Base class for L{GenericPollJob} reporting callbacks.
1604

1605
  """
1606
  def __init__(self):
1607
    """Initializes this class.
1608

1609
    """
1610

    
1611
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1612
    """Handles a log message.
1613

1614
    """
1615
    raise NotImplementedError()
1616

    
1617
  def ReportNotChanged(self, job_id, status):
1618
    """Called for if a job hasn't changed in a while.
1619

1620
    @type job_id: number
1621
    @param job_id: Job ID
1622
    @type status: string or None
1623
    @param status: Job status if available
1624

1625
    """
1626
    raise NotImplementedError()
1627

    
1628

    
1629
class _LuxiJobPollCb(JobPollCbBase):
1630
  def __init__(self, cl):
1631
    """Initializes this class.
1632

1633
    """
1634
    JobPollCbBase.__init__(self)
1635
    self.cl = cl
1636

    
1637
  def WaitForJobChangeOnce(self, job_id, fields,
1638
                           prev_job_info, prev_log_serial):
1639
    """Waits for changes on a job.
1640

1641
    """
1642
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1643
                                        prev_job_info, prev_log_serial)
1644

    
1645
  def QueryJobs(self, job_ids, fields):
1646
    """Returns the selected fields for the selected job IDs.
1647

1648
    """
1649
    return self.cl.QueryJobs(job_ids, fields)
1650

    
1651

    
1652
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1653
  def __init__(self, feedback_fn):
1654
    """Initializes this class.
1655

1656
    """
1657
    JobPollReportCbBase.__init__(self)
1658

    
1659
    self.feedback_fn = feedback_fn
1660

    
1661
    assert callable(feedback_fn)
1662

    
1663
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1664
    """Handles a log message.
1665

1666
    """
1667
    self.feedback_fn((timestamp, log_type, log_msg))
1668

    
1669
  def ReportNotChanged(self, job_id, status):
1670
    """Called if a job hasn't changed in a while.
1671

1672
    """
1673
    # Ignore
1674

    
1675

    
1676
class StdioJobPollReportCb(JobPollReportCbBase):
1677
  def __init__(self):
1678
    """Initializes this class.
1679

1680
    """
1681
    JobPollReportCbBase.__init__(self)
1682

    
1683
    self.notified_queued = False
1684
    self.notified_waitlock = False
1685

    
1686
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1687
    """Handles a log message.
1688

1689
    """
1690
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1691
             FormatLogMessage(log_type, log_msg))
1692

    
1693
  def ReportNotChanged(self, job_id, status):
1694
    """Called if a job hasn't changed in a while.
1695

1696
    """
1697
    if status is None:
1698
      return
1699

    
1700
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1701
      ToStderr("Job %s is waiting in queue", job_id)
1702
      self.notified_queued = True
1703

    
1704
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1705
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1706
      self.notified_waitlock = True
1707

    
1708

    
1709
def FormatLogMessage(log_type, log_msg):
1710
  """Formats a job message according to its type.
1711

1712
  """
1713
  if log_type != constants.ELOG_MESSAGE:
1714
    log_msg = str(log_msg)
1715

    
1716
  return utils.SafeEncode(log_msg)
1717

    
1718

    
1719
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1720
  """Function to poll for the result of a job.
1721

1722
  @type job_id: job identified
1723
  @param job_id: the job to poll for results
1724
  @type cl: luxi.Client
1725
  @param cl: the luxi client to use for communicating with the master;
1726
             if None, a new client will be created
1727

1728
  """
1729
  if cl is None:
1730
    cl = GetClient()
1731

    
1732
  if reporter is None:
1733
    if feedback_fn:
1734
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1735
    else:
1736
      reporter = StdioJobPollReportCb()
1737
  elif feedback_fn:
1738
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1739

    
1740
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1741

    
1742

    
1743
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1744
  """Legacy function to submit an opcode.
1745

1746
  This is just a simple wrapper over the construction of the processor
1747
  instance. It should be extended to better handle feedback and
1748
  interaction functions.
1749

1750
  """
1751
  if cl is None:
1752
    cl = GetClient()
1753

    
1754
  SetGenericOpcodeOpts([op], opts)
1755

    
1756
  job_id = SendJob([op], cl=cl)
1757

    
1758
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1759
                       reporter=reporter)
1760

    
1761
  return op_results[0]
1762

    
1763

    
1764
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1765
  """Wrapper around SubmitOpCode or SendJob.
1766

1767
  This function will decide, based on the 'opts' parameter, whether to
1768
  submit and wait for the result of the opcode (and return it), or
1769
  whether to just send the job and print its identifier. It is used in
1770
  order to simplify the implementation of the '--submit' option.
1771

1772
  It will also process the opcodes if we're sending the via SendJob
1773
  (otherwise SubmitOpCode does it).
1774

1775
  """
1776
  if opts and opts.submit_only:
1777
    job = [op]
1778
    SetGenericOpcodeOpts(job, opts)
1779
    job_id = SendJob(job, cl=cl)
1780
    raise JobSubmittedException(job_id)
1781
  else:
1782
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1783

    
1784

    
1785
def SetGenericOpcodeOpts(opcode_list, options):
1786
  """Processor for generic options.
1787

1788
  This function updates the given opcodes based on generic command
1789
  line options (like debug, dry-run, etc.).
1790

1791
  @param opcode_list: list of opcodes
1792
  @param options: command line options or None
1793
  @return: None (in-place modification)
1794

1795
  """
1796
  if not options:
1797
    return
1798
  for op in opcode_list:
1799
    op.debug_level = options.debug
1800
    if hasattr(options, "dry_run"):
1801
      op.dry_run = options.dry_run
1802
    if getattr(options, "priority", None) is not None:
1803
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1804

    
1805

    
1806
def GetClient():
1807
  # TODO: Cache object?
1808
  try:
1809
    client = luxi.Client()
1810
  except luxi.NoMasterError:
1811
    ss = ssconf.SimpleStore()
1812

    
1813
    # Try to read ssconf file
1814
    try:
1815
      ss.GetMasterNode()
1816
    except errors.ConfigurationError:
1817
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1818
                                 " not part of a cluster")
1819

    
1820
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1821
    if master != myself:
1822
      raise errors.OpPrereqError("This is not the master node, please connect"
1823
                                 " to node '%s' and rerun the command" %
1824
                                 master)
1825
    raise
1826
  return client
1827

    
1828

    
1829
def FormatError(err):
1830
  """Return a formatted error message for a given error.
1831

1832
  This function takes an exception instance and returns a tuple
1833
  consisting of two values: first, the recommended exit code, and
1834
  second, a string describing the error message (not
1835
  newline-terminated).
1836

1837
  """
1838
  retcode = 1
1839
  obuf = StringIO()
1840
  msg = str(err)
1841
  if isinstance(err, errors.ConfigurationError):
1842
    txt = "Corrupt configuration file: %s" % msg
1843
    logging.error(txt)
1844
    obuf.write(txt + "\n")
1845
    obuf.write("Aborting.")
1846
    retcode = 2
1847
  elif isinstance(err, errors.HooksAbort):
1848
    obuf.write("Failure: hooks execution failed:\n")
1849
    for node, script, out in err.args[0]:
1850
      if out:
1851
        obuf.write("  node: %s, script: %s, output: %s\n" %
1852
                   (node, script, out))
1853
      else:
1854
        obuf.write("  node: %s, script: %s (no output)\n" %
1855
                   (node, script))
1856
  elif isinstance(err, errors.HooksFailure):
1857
    obuf.write("Failure: hooks general failure: %s" % msg)
1858
  elif isinstance(err, errors.ResolverError):
1859
    this_host = netutils.Hostname.GetSysName()
1860
    if err.args[0] == this_host:
1861
      msg = "Failure: can't resolve my own hostname ('%s')"
1862
    else:
1863
      msg = "Failure: can't resolve hostname '%s'"
1864
    obuf.write(msg % err.args[0])
1865
  elif isinstance(err, errors.OpPrereqError):
1866
    if len(err.args) == 2:
1867
      obuf.write("Failure: prerequisites not met for this"
1868
               " operation:\nerror type: %s, error details:\n%s" %
1869
                 (err.args[1], err.args[0]))
1870
    else:
1871
      obuf.write("Failure: prerequisites not met for this"
1872
                 " operation:\n%s" % msg)
1873
  elif isinstance(err, errors.OpExecError):
1874
    obuf.write("Failure: command execution error:\n%s" % msg)
1875
  elif isinstance(err, errors.TagError):
1876
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1877
  elif isinstance(err, errors.JobQueueDrainError):
1878
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1879
               " accept new requests\n")
1880
  elif isinstance(err, errors.JobQueueFull):
1881
    obuf.write("Failure: the job queue is full and doesn't accept new"
1882
               " job submissions until old jobs are archived\n")
1883
  elif isinstance(err, errors.TypeEnforcementError):
1884
    obuf.write("Parameter Error: %s" % msg)
1885
  elif isinstance(err, errors.ParameterError):
1886
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1887
  elif isinstance(err, luxi.NoMasterError):
1888
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1889
               " and listening for connections?")
1890
  elif isinstance(err, luxi.TimeoutError):
1891
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1892
               " been submitted and will continue to run even if the call"
1893
               " timed out. Useful commands in this situation are \"gnt-job"
1894
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1895
    obuf.write(msg)
1896
  elif isinstance(err, luxi.PermissionError):
1897
    obuf.write("It seems you don't have permissions to connect to the"
1898
               " master daemon.\nPlease retry as a different user.")
1899
  elif isinstance(err, luxi.ProtocolError):
1900
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1901
               "%s" % msg)
1902
  elif isinstance(err, errors.JobLost):
1903
    obuf.write("Error checking job status: %s" % msg)
1904
  elif isinstance(err, errors.GenericError):
1905
    obuf.write("Unhandled Ganeti error: %s" % msg)
1906
  elif isinstance(err, JobSubmittedException):
1907
    obuf.write("JobID: %s\n" % err.args[0])
1908
    retcode = 0
1909
  else:
1910
    obuf.write("Unhandled exception: %s" % msg)
1911
  return retcode, obuf.getvalue().rstrip('\n')
1912

    
1913

    
1914
def GenericMain(commands, override=None, aliases=None):
1915
  """Generic main function for all the gnt-* commands.
1916

1917
  Arguments:
1918
    - commands: a dictionary with a special structure, see the design doc
1919
                for command line handling.
1920
    - override: if not None, we expect a dictionary with keys that will
1921
                override command line options; this can be used to pass
1922
                options from the scripts to generic functions
1923
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1924

1925
  """
1926
  # save the program name and the entire command line for later logging
1927
  if sys.argv:
1928
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1929
    if len(sys.argv) >= 2:
1930
      binary += " " + sys.argv[1]
1931
      old_cmdline = " ".join(sys.argv[2:])
1932
    else:
1933
      old_cmdline = ""
1934
  else:
1935
    binary = "<unknown program>"
1936
    old_cmdline = ""
1937

    
1938
  if aliases is None:
1939
    aliases = {}
1940

    
1941
  try:
1942
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1943
  except errors.ParameterError, err:
1944
    result, err_msg = FormatError(err)
1945
    ToStderr(err_msg)
1946
    return 1
1947

    
1948
  if func is None: # parse error
1949
    return 1
1950

    
1951
  if override is not None:
1952
    for key, val in override.iteritems():
1953
      setattr(options, key, val)
1954

    
1955
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1956
                     stderr_logging=True)
1957

    
1958
  if old_cmdline:
1959
    logging.info("run with arguments '%s'", old_cmdline)
1960
  else:
1961
    logging.info("run with no arguments")
1962

    
1963
  try:
1964
    result = func(options, args)
1965
  except (errors.GenericError, luxi.ProtocolError,
1966
          JobSubmittedException), err:
1967
    result, err_msg = FormatError(err)
1968
    logging.exception("Error during command processing")
1969
    ToStderr(err_msg)
1970
  except KeyboardInterrupt:
1971
    result = constants.EXIT_FAILURE
1972
    ToStderr("Aborted. Note that if the operation created any jobs, they"
1973
             " might have been submitted and"
1974
             " will continue to run in the background.")
1975

    
1976
  return result
1977

    
1978

    
1979
def ParseNicOption(optvalue):
1980
  """Parses the value of the --net option(s).
1981

1982
  """
1983
  try:
1984
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1985
  except (TypeError, ValueError), err:
1986
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1987

    
1988
  nics = [{}] * nic_max
1989
  for nidx, ndict in optvalue:
1990
    nidx = int(nidx)
1991

    
1992
    if not isinstance(ndict, dict):
1993
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1994
                                 " got %s" % (nidx, ndict))
1995

    
1996
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1997

    
1998
    nics[nidx] = ndict
1999

    
2000
  return nics
2001

    
2002

    
2003
def GenericInstanceCreate(mode, opts, args):
2004
  """Add an instance to the cluster via either creation or import.
2005

2006
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2007
  @param opts: the command line options selected by the user
2008
  @type args: list
2009
  @param args: should contain only one element, the new instance name
2010
  @rtype: int
2011
  @return: the desired exit code
2012

2013
  """
2014
  instance = args[0]
2015

    
2016
  (pnode, snode) = SplitNodeOption(opts.node)
2017

    
2018
  hypervisor = None
2019
  hvparams = {}
2020
  if opts.hypervisor:
2021
    hypervisor, hvparams = opts.hypervisor
2022

    
2023
  if opts.nics:
2024
    nics = ParseNicOption(opts.nics)
2025
  elif opts.no_nics:
2026
    # no nics
2027
    nics = []
2028
  elif mode == constants.INSTANCE_CREATE:
2029
    # default of one nic, all auto
2030
    nics = [{}]
2031
  else:
2032
    # mode == import
2033
    nics = []
2034

    
2035
  if opts.disk_template == constants.DT_DISKLESS:
2036
    if opts.disks or opts.sd_size is not None:
2037
      raise errors.OpPrereqError("Diskless instance but disk"
2038
                                 " information passed")
2039
    disks = []
2040
  else:
2041
    if (not opts.disks and not opts.sd_size
2042
        and mode == constants.INSTANCE_CREATE):
2043
      raise errors.OpPrereqError("No disk information specified")
2044
    if opts.disks and opts.sd_size is not None:
2045
      raise errors.OpPrereqError("Please use either the '--disk' or"
2046
                                 " '-s' option")
2047
    if opts.sd_size is not None:
2048
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2049

    
2050
    if opts.disks:
2051
      try:
2052
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2053
      except ValueError, err:
2054
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2055
      disks = [{}] * disk_max
2056
    else:
2057
      disks = []
2058
    for didx, ddict in opts.disks:
2059
      didx = int(didx)
2060
      if not isinstance(ddict, dict):
2061
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2062
        raise errors.OpPrereqError(msg)
2063
      elif constants.IDISK_SIZE in ddict:
2064
        if constants.IDISK_ADOPT in ddict:
2065
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2066
                                     " (disk %d)" % didx)
2067
        try:
2068
          ddict[constants.IDISK_SIZE] = \
2069
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2070
        except ValueError, err:
2071
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2072
                                     (didx, err))
2073
      elif constants.IDISK_ADOPT in ddict:
2074
        if mode == constants.INSTANCE_IMPORT:
2075
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2076
                                     " import")
2077
        ddict[constants.IDISK_SIZE] = 0
2078
      else:
2079
        raise errors.OpPrereqError("Missing size or adoption source for"
2080
                                   " disk %d" % didx)
2081
      disks[didx] = ddict
2082

    
2083
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2084
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2085

    
2086
  if mode == constants.INSTANCE_CREATE:
2087
    start = opts.start
2088
    os_type = opts.os
2089
    force_variant = opts.force_variant
2090
    src_node = None
2091
    src_path = None
2092
    no_install = opts.no_install
2093
    identify_defaults = False
2094
  elif mode == constants.INSTANCE_IMPORT:
2095
    start = False
2096
    os_type = None
2097
    force_variant = False
2098
    src_node = opts.src_node
2099
    src_path = opts.src_dir
2100
    no_install = None
2101
    identify_defaults = opts.identify_defaults
2102
  else:
2103
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2104

    
2105
  op = opcodes.OpInstanceCreate(instance_name=instance,
2106
                                disks=disks,
2107
                                disk_template=opts.disk_template,
2108
                                nics=nics,
2109
                                pnode=pnode, snode=snode,
2110
                                ip_check=opts.ip_check,
2111
                                name_check=opts.name_check,
2112
                                wait_for_sync=opts.wait_for_sync,
2113
                                file_storage_dir=opts.file_storage_dir,
2114
                                file_driver=opts.file_driver,
2115
                                iallocator=opts.iallocator,
2116
                                hypervisor=hypervisor,
2117
                                hvparams=hvparams,
2118
                                beparams=opts.beparams,
2119
                                osparams=opts.osparams,
2120
                                mode=mode,
2121
                                start=start,
2122
                                os_type=os_type,
2123
                                force_variant=force_variant,
2124
                                src_node=src_node,
2125
                                src_path=src_path,
2126
                                no_install=no_install,
2127
                                identify_defaults=identify_defaults)
2128

    
2129
  SubmitOrSend(op, opts)
2130
  return 0
2131

    
2132

    
2133
class _RunWhileClusterStoppedHelper:
2134
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2135

2136
  """
2137
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2138
    """Initializes this class.
2139

2140
    @type feedback_fn: callable
2141
    @param feedback_fn: Feedback function
2142
    @type cluster_name: string
2143
    @param cluster_name: Cluster name
2144
    @type master_node: string
2145
    @param master_node Master node name
2146
    @type online_nodes: list
2147
    @param online_nodes: List of names of online nodes
2148

2149
    """
2150
    self.feedback_fn = feedback_fn
2151
    self.cluster_name = cluster_name
2152
    self.master_node = master_node
2153
    self.online_nodes = online_nodes
2154

    
2155
    self.ssh = ssh.SshRunner(self.cluster_name)
2156

    
2157
    self.nonmaster_nodes = [name for name in online_nodes
2158
                            if name != master_node]
2159

    
2160
    assert self.master_node not in self.nonmaster_nodes
2161

    
2162
  def _RunCmd(self, node_name, cmd):
2163
    """Runs a command on the local or a remote machine.
2164

2165
    @type node_name: string
2166
    @param node_name: Machine name
2167
    @type cmd: list
2168
    @param cmd: Command
2169

2170
    """
2171
    if node_name is None or node_name == self.master_node:
2172
      # No need to use SSH
2173
      result = utils.RunCmd(cmd)
2174
    else:
2175
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2176

    
2177
    if result.failed:
2178
      errmsg = ["Failed to run command %s" % result.cmd]
2179
      if node_name:
2180
        errmsg.append("on node %s" % node_name)
2181
      errmsg.append(": exitcode %s and error %s" %
2182
                    (result.exit_code, result.output))
2183
      raise errors.OpExecError(" ".join(errmsg))
2184

    
2185
  def Call(self, fn, *args):
2186
    """Call function while all daemons are stopped.
2187

2188
    @type fn: callable
2189
    @param fn: Function to be called
2190

2191
    """
2192
    # Pause watcher by acquiring an exclusive lock on watcher state file
2193
    self.feedback_fn("Blocking watcher")
2194
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2195
    try:
2196
      # TODO: Currently, this just blocks. There's no timeout.
2197
      # TODO: Should it be a shared lock?
2198
      watcher_block.Exclusive(blocking=True)
2199

    
2200
      # Stop master daemons, so that no new jobs can come in and all running
2201
      # ones are finished
2202
      self.feedback_fn("Stopping master daemons")
2203
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2204
      try:
2205
        # Stop daemons on all nodes
2206
        for node_name in self.online_nodes:
2207
          self.feedback_fn("Stopping daemons on %s" % node_name)
2208
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2209

    
2210
        # All daemons are shut down now
2211
        try:
2212
          return fn(self, *args)
2213
        except Exception, err:
2214
          _, errmsg = FormatError(err)
2215
          logging.exception("Caught exception")
2216
          self.feedback_fn(errmsg)
2217
          raise
2218
      finally:
2219
        # Start cluster again, master node last
2220
        for node_name in self.nonmaster_nodes + [self.master_node]:
2221
          self.feedback_fn("Starting daemons on %s" % node_name)
2222
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2223
    finally:
2224
      # Resume watcher
2225
      watcher_block.Close()
2226

    
2227

    
2228
def RunWhileClusterStopped(feedback_fn, fn, *args):
2229
  """Calls a function while all cluster daemons are stopped.
2230

2231
  @type feedback_fn: callable
2232
  @param feedback_fn: Feedback function
2233
  @type fn: callable
2234
  @param fn: Function to be called when daemons are stopped
2235

2236
  """
2237
  feedback_fn("Gathering cluster information")
2238

    
2239
  # This ensures we're running on the master daemon
2240
  cl = GetClient()
2241

    
2242
  (cluster_name, master_node) = \
2243
    cl.QueryConfigValues(["cluster_name", "master_node"])
2244

    
2245
  online_nodes = GetOnlineNodes([], cl=cl)
2246

    
2247
  # Don't keep a reference to the client. The master daemon will go away.
2248
  del cl
2249

    
2250
  assert master_node in online_nodes
2251

    
2252
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2253
                                       online_nodes).Call(fn, *args)
2254

    
2255

    
2256
def GenerateTable(headers, fields, separator, data,
2257
                  numfields=None, unitfields=None,
2258
                  units=None):
2259
  """Prints a table with headers and different fields.
2260

2261
  @type headers: dict
2262
  @param headers: dictionary mapping field names to headers for
2263
      the table
2264
  @type fields: list
2265
  @param fields: the field names corresponding to each row in
2266
      the data field
2267
  @param separator: the separator to be used; if this is None,
2268
      the default 'smart' algorithm is used which computes optimal
2269
      field width, otherwise just the separator is used between
2270
      each field
2271
  @type data: list
2272
  @param data: a list of lists, each sublist being one row to be output
2273
  @type numfields: list
2274
  @param numfields: a list with the fields that hold numeric
2275
      values and thus should be right-aligned
2276
  @type unitfields: list
2277
  @param unitfields: a list with the fields that hold numeric
2278
      values that should be formatted with the units field
2279
  @type units: string or None
2280
  @param units: the units we should use for formatting, or None for
2281
      automatic choice (human-readable for non-separator usage, otherwise
2282
      megabytes); this is a one-letter string
2283

2284
  """
2285
  if units is None:
2286
    if separator:
2287
      units = "m"
2288
    else:
2289
      units = "h"
2290

    
2291
  if numfields is None:
2292
    numfields = []
2293
  if unitfields is None:
2294
    unitfields = []
2295

    
2296
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2297
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2298

    
2299
  format_fields = []
2300
  for field in fields:
2301
    if headers and field not in headers:
2302
      # TODO: handle better unknown fields (either revert to old
2303
      # style of raising exception, or deal more intelligently with
2304
      # variable fields)
2305
      headers[field] = field
2306
    if separator is not None:
2307
      format_fields.append("%s")
2308
    elif numfields.Matches(field):
2309
      format_fields.append("%*s")
2310
    else:
2311
      format_fields.append("%-*s")
2312

    
2313
  if separator is None:
2314
    mlens = [0 for name in fields]
2315
    format_str = ' '.join(format_fields)
2316
  else:
2317
    format_str = separator.replace("%", "%%").join(format_fields)
2318

    
2319
  for row in data:
2320
    if row is None:
2321
      continue
2322
    for idx, val in enumerate(row):
2323
      if unitfields.Matches(fields[idx]):
2324
        try:
2325
          val = int(val)
2326
        except (TypeError, ValueError):
2327
          pass
2328
        else:
2329
          val = row[idx] = utils.FormatUnit(val, units)
2330
      val = row[idx] = str(val)
2331
      if separator is None:
2332
        mlens[idx] = max(mlens[idx], len(val))
2333

    
2334
  result = []
2335
  if headers:
2336
    args = []
2337
    for idx, name in enumerate(fields):
2338
      hdr = headers[name]
2339
      if separator is None:
2340
        mlens[idx] = max(mlens[idx], len(hdr))
2341
        args.append(mlens[idx])
2342
      args.append(hdr)
2343
    result.append(format_str % tuple(args))
2344

    
2345
  if separator is None:
2346
    assert len(mlens) == len(fields)
2347

    
2348
    if fields and not numfields.Matches(fields[-1]):
2349
      mlens[-1] = 0
2350

    
2351
  for line in data:
2352
    args = []
2353
    if line is None:
2354
      line = ['-' for _ in fields]
2355
    for idx in range(len(fields)):
2356
      if separator is None:
2357
        args.append(mlens[idx])
2358
      args.append(line[idx])
2359
    result.append(format_str % tuple(args))
2360

    
2361
  return result
2362

    
2363

    
2364
def _FormatBool(value):
2365
  """Formats a boolean value as a string.
2366

2367
  """
2368
  if value:
2369
    return "Y"
2370
  return "N"
2371

    
2372

    
2373
#: Default formatting for query results; (callback, align right)
2374
_DEFAULT_FORMAT_QUERY = {
2375
  constants.QFT_TEXT: (str, False),
2376
  constants.QFT_BOOL: (_FormatBool, False),
2377
  constants.QFT_NUMBER: (str, True),
2378
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2379
  constants.QFT_OTHER: (str, False),
2380
  constants.QFT_UNKNOWN: (str, False),
2381
  }
2382

    
2383

    
2384
def _GetColumnFormatter(fdef, override, unit):
2385
  """Returns formatting function for a field.
2386

2387
  @type fdef: L{objects.QueryFieldDefinition}
2388
  @type override: dict
2389
  @param override: Dictionary for overriding field formatting functions,
2390
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2391
  @type unit: string
2392
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2393
  @rtype: tuple; (callable, bool)
2394
  @return: Returns the function to format a value (takes one parameter) and a
2395
    boolean for aligning the value on the right-hand side
2396

2397
  """
2398
  fmt = override.get(fdef.name, None)
2399
  if fmt is not None:
2400
    return fmt
2401

    
2402
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2403

    
2404
  if fdef.kind == constants.QFT_UNIT:
2405
    # Can't keep this information in the static dictionary
2406
    return (lambda value: utils.FormatUnit(value, unit), True)
2407

    
2408
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2409
  if fmt is not None:
2410
    return fmt
2411

    
2412
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2413

    
2414

    
2415
class _QueryColumnFormatter:
2416
  """Callable class for formatting fields of a query.
2417

2418
  """
2419
  def __init__(self, fn, status_fn, verbose):
2420
    """Initializes this class.
2421

2422
    @type fn: callable
2423
    @param fn: Formatting function
2424
    @type status_fn: callable
2425
    @param status_fn: Function to report fields' status
2426
    @type verbose: boolean
2427
    @param verbose: whether to use verbose field descriptions or not
2428

2429
    """
2430
    self._fn = fn
2431
    self._status_fn = status_fn
2432
    self._verbose = verbose
2433

    
2434
  def __call__(self, data):
2435
    """Returns a field's string representation.
2436

2437
    """
2438
    (status, value) = data
2439

    
2440
    # Report status
2441
    self._status_fn(status)
2442

    
2443
    if status == constants.RS_NORMAL:
2444
      return self._fn(value)
2445

    
2446
    assert value is None, \
2447
           "Found value %r for abnormal status %s" % (value, status)
2448

    
2449
    return FormatResultError(status, self._verbose)
2450

    
2451

    
2452
def FormatResultError(status, verbose):
2453
  """Formats result status other than L{constants.RS_NORMAL}.
2454

2455
  @param status: The result status
2456
  @type verbose: boolean
2457
  @param verbose: Whether to return the verbose text
2458
  @return: Text of result status
2459

2460
  """
2461
  assert status != constants.RS_NORMAL, \
2462
         "FormatResultError called with status equal to constants.RS_NORMAL"
2463
  try:
2464
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2465
  except KeyError:
2466
    raise NotImplementedError("Unknown status %s" % status)
2467
  else:
2468
    if verbose:
2469
      return verbose_text
2470
    return normal_text
2471

    
2472

    
2473
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2474
                      header=False, verbose=False):
2475
  """Formats data in L{objects.QueryResponse}.
2476

2477
  @type result: L{objects.QueryResponse}
2478
  @param result: result of query operation
2479
  @type unit: string
2480
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2481
    see L{utils.text.FormatUnit}
2482
  @type format_override: dict
2483
  @param format_override: Dictionary for overriding field formatting functions,
2484
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2485
  @type separator: string or None
2486
  @param separator: String used to separate fields
2487
  @type header: bool
2488
  @param header: Whether to output header row
2489
  @type verbose: boolean
2490
  @param verbose: whether to use verbose field descriptions or not
2491

2492
  """
2493
  if unit is None:
2494
    if separator:
2495
      unit = "m"
2496
    else:
2497
      unit = "h"
2498

    
2499
  if format_override is None:
2500
    format_override = {}
2501

    
2502
  stats = dict.fromkeys(constants.RS_ALL, 0)
2503

    
2504
  def _RecordStatus(status):
2505
    if status in stats:
2506
      stats[status] += 1
2507

    
2508
  columns = []
2509
  for fdef in result.fields:
2510
    assert fdef.title and fdef.name
2511
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2512
    columns.append(TableColumn(fdef.title,
2513
                               _QueryColumnFormatter(fn, _RecordStatus,
2514
                                                     verbose),
2515
                               align_right))
2516

    
2517
  table = FormatTable(result.data, columns, header, separator)
2518

    
2519
  # Collect statistics
2520
  assert len(stats) == len(constants.RS_ALL)
2521
  assert compat.all(count >= 0 for count in stats.values())
2522

    
2523
  # Determine overall status. If there was no data, unknown fields must be
2524
  # detected via the field definitions.
2525
  if (stats[constants.RS_UNKNOWN] or
2526
      (not result.data and _GetUnknownFields(result.fields))):
2527
    status = QR_UNKNOWN
2528
  elif compat.any(count > 0 for key, count in stats.items()
2529
                  if key != constants.RS_NORMAL):
2530
    status = QR_INCOMPLETE
2531
  else:
2532
    status = QR_NORMAL
2533

    
2534
  return (status, table)
2535

    
2536

    
2537
def _GetUnknownFields(fdefs):
2538
  """Returns list of unknown fields included in C{fdefs}.
2539

2540
  @type fdefs: list of L{objects.QueryFieldDefinition}
2541

2542
  """
2543
  return [fdef for fdef in fdefs
2544
          if fdef.kind == constants.QFT_UNKNOWN]
2545

    
2546

    
2547
def _WarnUnknownFields(fdefs):
2548
  """Prints a warning to stderr if a query included unknown fields.
2549

2550
  @type fdefs: list of L{objects.QueryFieldDefinition}
2551

2552
  """
2553
  unknown = _GetUnknownFields(fdefs)
2554
  if unknown:
2555
    ToStderr("Warning: Queried for unknown fields %s",
2556
             utils.CommaJoin(fdef.name for fdef in unknown))
2557
    return True
2558

    
2559
  return False
2560

    
2561

    
2562
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2563
                format_override=None, verbose=False):
2564
  """Generic implementation for listing all items of a resource.
2565

2566
  @param resource: One of L{constants.QR_VIA_LUXI}
2567
  @type fields: list of strings
2568
  @param fields: List of fields to query for
2569
  @type names: list of strings
2570
  @param names: Names of items to query for
2571
  @type unit: string or None
2572
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2573
    None for automatic choice (human-readable for non-separator usage,
2574
    otherwise megabytes); this is a one-letter string
2575
  @type separator: string or None
2576
  @param separator: String used to separate fields
2577
  @type header: bool
2578
  @param header: Whether to show header row
2579
  @type format_override: dict
2580
  @param format_override: Dictionary for overriding field formatting functions,
2581
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2582
  @type verbose: boolean
2583
  @param verbose: whether to use verbose field descriptions or not
2584

2585
  """
2586
  if cl is None:
2587
    cl = GetClient()
2588

    
2589
  if not names:
2590
    names = None
2591

    
2592
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2593

    
2594
  found_unknown = _WarnUnknownFields(response.fields)
2595

    
2596
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2597
                                     header=header,
2598
                                     format_override=format_override,
2599
                                     verbose=verbose)
2600

    
2601
  for line in data:
2602
    ToStdout(line)
2603

    
2604
  assert ((found_unknown and status == QR_UNKNOWN) or
2605
          (not found_unknown and status != QR_UNKNOWN))
2606

    
2607
  if status == QR_UNKNOWN:
2608
    return constants.EXIT_UNKNOWN_FIELD
2609

    
2610
  # TODO: Should the list command fail if not all data could be collected?
2611
  return constants.EXIT_SUCCESS
2612

    
2613

    
2614
def GenericListFields(resource, fields, separator, header, cl=None):
2615
  """Generic implementation for listing fields for a resource.
2616

2617
  @param resource: One of L{constants.QR_VIA_LUXI}
2618
  @type fields: list of strings
2619
  @param fields: List of fields to query for
2620
  @type separator: string or None
2621
  @param separator: String used to separate fields
2622
  @type header: bool
2623
  @param header: Whether to show header row
2624

2625
  """
2626
  if cl is None:
2627
    cl = GetClient()
2628

    
2629
  if not fields:
2630
    fields = None
2631

    
2632
  response = cl.QueryFields(resource, fields)
2633

    
2634
  found_unknown = _WarnUnknownFields(response.fields)
2635

    
2636
  columns = [
2637
    TableColumn("Name", str, False),
2638
    TableColumn("Title", str, False),
2639
    TableColumn("Description", str, False),
2640
    ]
2641

    
2642
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2643

    
2644
  for line in FormatTable(rows, columns, header, separator):
2645
    ToStdout(line)
2646

    
2647
  if found_unknown:
2648
    return constants.EXIT_UNKNOWN_FIELD
2649

    
2650
  return constants.EXIT_SUCCESS
2651

    
2652

    
2653
class TableColumn:
2654
  """Describes a column for L{FormatTable}.
2655

2656
  """
2657
  def __init__(self, title, fn, align_right):
2658
    """Initializes this class.
2659

2660
    @type title: string
2661
    @param title: Column title
2662
    @type fn: callable
2663
    @param fn: Formatting function
2664
    @type align_right: bool
2665
    @param align_right: Whether to align values on the right-hand side
2666

2667
    """
2668
    self.title = title
2669
    self.format = fn
2670
    self.align_right = align_right
2671

    
2672

    
2673
def _GetColFormatString(width, align_right):
2674
  """Returns the format string for a field.
2675

2676
  """
2677
  if align_right:
2678
    sign = ""
2679
  else:
2680
    sign = "-"
2681

    
2682
  return "%%%s%ss" % (sign, width)
2683

    
2684

    
2685
def FormatTable(rows, columns, header, separator):
2686
  """Formats data as a table.
2687

2688
  @type rows: list of lists
2689
  @param rows: Row data, one list per row
2690
  @type columns: list of L{TableColumn}
2691
  @param columns: Column descriptions
2692
  @type header: bool
2693
  @param header: Whether to show header row
2694
  @type separator: string or None
2695
  @param separator: String used to separate columns
2696

2697
  """
2698
  if header:
2699
    data = [[col.title for col in columns]]
2700
    colwidth = [len(col.title) for col in columns]
2701
  else:
2702
    data = []
2703
    colwidth = [0 for _ in columns]
2704

    
2705
  # Format row data
2706
  for row in rows:
2707
    assert len(row) == len(columns)
2708

    
2709
    formatted = [col.format(value) for value, col in zip(row, columns)]
2710

    
2711
    if separator is None:
2712
      # Update column widths
2713
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2714
        # Modifying a list's items while iterating is fine
2715
        colwidth[idx] = max(oldwidth, len(value))
2716

    
2717
    data.append(formatted)
2718

    
2719
  if separator is not None:
2720
    # Return early if a separator is used
2721
    return [separator.join(row) for row in data]
2722

    
2723
  if columns and not columns[-1].align_right:
2724
    # Avoid unnecessary spaces at end of line
2725
    colwidth[-1] = 0
2726

    
2727
  # Build format string
2728
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2729
                  for col, width in zip(columns, colwidth)])
2730

    
2731
  return [fmt % tuple(row) for row in data]
2732

    
2733

    
2734
def FormatTimestamp(ts):
2735
  """Formats a given timestamp.
2736

2737
  @type ts: timestamp
2738
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2739

2740
  @rtype: string
2741
  @return: a string with the formatted timestamp
2742

2743
  """
2744
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2745
    return '?'
2746
  sec, usec = ts
2747
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2748

    
2749

    
2750
def ParseTimespec(value):
2751
  """Parse a time specification.
2752

2753
  The following suffixed will be recognized:
2754

2755
    - s: seconds
2756
    - m: minutes
2757
    - h: hours
2758
    - d: day
2759
    - w: weeks
2760

2761
  Without any suffix, the value will be taken to be in seconds.
2762

2763
  """
2764
  value = str(value)
2765
  if not value:
2766
    raise errors.OpPrereqError("Empty time specification passed")
2767
  suffix_map = {
2768
    's': 1,
2769
    'm': 60,
2770
    'h': 3600,
2771
    'd': 86400,
2772
    'w': 604800,
2773
    }
2774
  if value[-1] not in suffix_map:
2775
    try:
2776
      value = int(value)
2777
    except (TypeError, ValueError):
2778
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2779
  else:
2780
    multiplier = suffix_map[value[-1]]
2781
    value = value[:-1]
2782
    if not value: # no data left after stripping the suffix
2783
      raise errors.OpPrereqError("Invalid time specification (only"
2784
                                 " suffix passed)")
2785
    try:
2786
      value = int(value) * multiplier
2787
    except (TypeError, ValueError):
2788
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2789
  return value
2790

    
2791

    
2792
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2793
                   filter_master=False):
2794
  """Returns the names of online nodes.
2795

2796
  This function will also log a warning on stderr with the names of
2797
  the online nodes.
2798

2799
  @param nodes: if not empty, use only this subset of nodes (minus the
2800
      offline ones)
2801
  @param cl: if not None, luxi client to use
2802
  @type nowarn: boolean
2803
  @param nowarn: by default, this function will output a note with the
2804
      offline nodes that are skipped; if this parameter is True the
2805
      note is not displayed
2806
  @type secondary_ips: boolean
2807
  @param secondary_ips: if True, return the secondary IPs instead of the
2808
      names, useful for doing network traffic over the replication interface
2809
      (if any)
2810
  @type filter_master: boolean
2811
  @param filter_master: if True, do not return the master node in the list
2812
      (useful in coordination with secondary_ips where we cannot check our
2813
      node name against the list)
2814

2815
  """
2816
  if cl is None:
2817
    cl = GetClient()
2818

    
2819
  if secondary_ips:
2820
    name_idx = 2
2821
  else:
2822
    name_idx = 0
2823

    
2824
  if filter_master:
2825
    master_node = cl.QueryConfigValues(["master_node"])[0]
2826
    filter_fn = lambda x: x != master_node
2827
  else:
2828
    filter_fn = lambda _: True
2829

    
2830
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2831
                         use_locking=False)
2832
  offline = [row[0] for row in result if row[1]]
2833
  if offline and not nowarn:
2834
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2835
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2836

    
2837

    
2838
def _ToStream(stream, txt, *args):
2839
  """Write a message to a stream, bypassing the logging system
2840

2841
  @type stream: file object
2842
  @param stream: the file to which we should write
2843
  @type txt: str
2844
  @param txt: the message
2845

2846
  """
2847
  if args:
2848
    args = tuple(args)
2849
    stream.write(txt % args)
2850
  else:
2851
    stream.write(txt)
2852
  stream.write('\n')
2853
  stream.flush()
2854

    
2855

    
2856
def ToStdout(txt, *args):
2857
  """Write a message to stdout only, bypassing the logging system
2858

2859
  This is just a wrapper over _ToStream.
2860

2861
  @type txt: str
2862
  @param txt: the message
2863

2864
  """
2865
  _ToStream(sys.stdout, txt, *args)
2866

    
2867

    
2868
def ToStderr(txt, *args):
2869
  """Write a message to stderr only, bypassing the logging system
2870

2871
  This is just a wrapper over _ToStream.
2872

2873
  @type txt: str
2874
  @param txt: the message
2875

2876
  """
2877
  _ToStream(sys.stderr, txt, *args)
2878

    
2879

    
2880
class JobExecutor(object):
2881
  """Class which manages the submission and execution of multiple jobs.
2882

2883
  Note that instances of this class should not be reused between
2884
  GetResults() calls.
2885

2886
  """
2887
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2888
    self.queue = []
2889
    if cl is None:
2890
      cl = GetClient()
2891
    self.cl = cl
2892
    self.verbose = verbose
2893
    self.jobs = []
2894
    self.opts = opts
2895
    self.feedback_fn = feedback_fn
2896

    
2897
  def QueueJob(self, name, *ops):
2898
    """Record a job for later submit.
2899

2900
    @type name: string
2901
    @param name: a description of the job, will be used in WaitJobSet
2902
    """
2903
    SetGenericOpcodeOpts(ops, self.opts)
2904
    self.queue.append((name, ops))
2905

    
2906
  def SubmitPending(self, each=False):
2907
    """Submit all pending jobs.
2908

2909
    """
2910
    if each:
2911
      results = []
2912
      for row in self.queue:
2913
        # SubmitJob will remove the success status, but raise an exception if
2914
        # the submission fails, so we'll notice that anyway.
2915
        results.append([True, self.cl.SubmitJob(row[1])])
2916
    else:
2917
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2918
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2919
                                                            self.queue)):
2920
      self.jobs.append((idx, status, data, name))
2921

    
2922
  def _ChooseJob(self):
2923
    """Choose a non-waiting/queued job to poll next.
2924

2925
    """
2926
    assert self.jobs, "_ChooseJob called with empty job list"
2927

    
2928
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2929
    assert result
2930

    
2931
    for job_data, status in zip(self.jobs, result):
2932
      if (isinstance(status, list) and status and
2933
          status[0] in (constants.JOB_STATUS_QUEUED,
2934
                        constants.JOB_STATUS_WAITLOCK,
2935
                        constants.JOB_STATUS_CANCELING)):
2936
        # job is still present and waiting
2937
        continue
2938
      # good candidate found (either running job or lost job)
2939
      self.jobs.remove(job_data)
2940
      return job_data
2941

    
2942
    # no job found
2943
    return self.jobs.pop(0)
2944

    
2945
  def GetResults(self):
2946
    """Wait for and return the results of all jobs.
2947

2948
    @rtype: list
2949
    @return: list of tuples (success, job results), in the same order
2950
        as the submitted jobs; if a job has failed, instead of the result
2951
        there will be the error message
2952

2953
    """
2954
    if not self.jobs:
2955
      self.SubmitPending()
2956
    results = []
2957
    if self.verbose:
2958
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2959
      if ok_jobs:
2960
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2961

    
2962
    # first, remove any non-submitted jobs
2963
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2964
    for idx, _, jid, name in failures:
2965
      ToStderr("Failed to submit job for %s: %s", name, jid)
2966
      results.append((idx, False, jid))
2967

    
2968
    while self.jobs:
2969
      (idx, _, jid, name) = self._ChooseJob()
2970
      ToStdout("Waiting for job %s for %s...", jid, name)
2971
      try:
2972
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2973
        success = True
2974
      except errors.JobLost, err:
2975
        _, job_result = FormatError(err)
2976
        ToStderr("Job %s for %s has been archived, cannot check its result",
2977
                 jid, name)
2978
        success = False
2979
      except (errors.GenericError, luxi.ProtocolError), err:
2980
        _, job_result = FormatError(err)
2981
        success = False
2982
        # the error message will always be shown, verbose or not
2983
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2984

    
2985
      results.append((idx, success, job_result))
2986

    
2987
    # sort based on the index, then drop it
2988
    results.sort()
2989
    results = [i[1:] for i in results]
2990

    
2991
    return results
2992

    
2993
  def WaitOrShow(self, wait):
2994
    """Wait for job results or only print the job IDs.
2995

2996
    @type wait: boolean
2997
    @param wait: whether to wait or not
2998

2999
    """
3000
    if wait:
3001
      return self.GetResults()
3002
    else:
3003
      if not self.jobs:
3004
        self.SubmitPending()
3005
      for _, status, result, name in self.jobs:
3006
        if status:
3007
          ToStdout("%s: %s", result, name)
3008
        else:
3009
          ToStderr("Failure for %s: %s", name, result)
3010
      return [row[1:3] for row in self.jobs]
3011

    
3012

    
3013
def FormatParameterDict(buf, param_dict, actual, level=1):
3014
  """Formats a parameter dictionary.
3015

3016
  @type buf: L{StringIO}
3017
  @param buf: the buffer into which to write
3018
  @type param_dict: dict
3019
  @param param_dict: the own parameters
3020
  @type actual: dict
3021
  @param actual: the current parameter set (including defaults)
3022
  @param level: Level of indent
3023

3024
  """
3025
  indent = "  " * level
3026
  for key in sorted(actual):
3027
    val = param_dict.get(key, "default (%s)" % actual[key])
3028
    buf.write("%s- %s: %s\n" % (indent, key, val))
3029

    
3030

    
3031
def ConfirmOperation(names, list_type, text, extra=""):
3032
  """Ask the user to confirm an operation on a list of list_type.
3033

3034
  This function is used to request confirmation for doing an operation
3035
  on a given list of list_type.
3036

3037
  @type names: list
3038
  @param names: the list of names that we display when
3039
      we ask for confirmation
3040
  @type list_type: str
3041
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3042
  @type text: str
3043
  @param text: the operation that the user should confirm
3044
  @rtype: boolean
3045
  @return: True or False depending on user's confirmation.
3046

3047
  """
3048
  count = len(names)
3049
  msg = ("The %s will operate on %d %s.\n%s"
3050
         "Do you want to continue?" % (text, count, list_type, extra))
3051
  affected = (("\nAffected %s:\n" % list_type) +
3052
              "\n".join(["  %s" % name for name in names]))
3053

    
3054
  choices = [("y", True, "Yes, execute the %s" % text),
3055
             ("n", False, "No, abort the %s" % text)]
3056

    
3057
  if count > 20:
3058
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3059
    question = msg
3060
  else:
3061
    question = msg + affected
3062

    
3063
  choice = AskUser(question, choices)
3064
  if choice == "v":
3065
    choices.pop(1)
3066
    choice = AskUser(msg + affected, choices)
3067
  return choice