Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ fcecea0b

History | View | Annotate | Download (97.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "AUTO_PROMOTE_OPT",
55
  "AUTO_REPLACE_OPT",
56
  "BACKEND_OPT",
57
  "BLK_OS_OPT",
58
  "CAPAB_MASTER_OPT",
59
  "CAPAB_VM_OPT",
60
  "CLEANUP_OPT",
61
  "CLUSTER_DOMAIN_SECRET_OPT",
62
  "CONFIRM_OPT",
63
  "CP_SIZE_OPT",
64
  "DEBUG_OPT",
65
  "DEBUG_SIMERR_OPT",
66
  "DISKIDX_OPT",
67
  "DISK_OPT",
68
  "DISK_TEMPLATE_OPT",
69
  "DRAINED_OPT",
70
  "DRY_RUN_OPT",
71
  "DRBD_HELPER_OPT",
72
  "DST_NODE_OPT",
73
  "EARLY_RELEASE_OPT",
74
  "ENABLED_HV_OPT",
75
  "ERROR_CODES_OPT",
76
  "FIELDS_OPT",
77
  "FILESTORE_DIR_OPT",
78
  "FILESTORE_DRIVER_OPT",
79
  "FORCE_OPT",
80
  "FORCE_VARIANT_OPT",
81
  "GLOBAL_FILEDIR_OPT",
82
  "HID_OS_OPT",
83
  "GLOBAL_SHARED_FILEDIR_OPT",
84
  "HVLIST_OPT",
85
  "HVOPTS_OPT",
86
  "HYPERVISOR_OPT",
87
  "IALLOCATOR_OPT",
88
  "DEFAULT_IALLOCATOR_OPT",
89
  "IDENTIFY_DEFAULTS_OPT",
90
  "IGNORE_CONSIST_OPT",
91
  "IGNORE_FAILURES_OPT",
92
  "IGNORE_OFFLINE_OPT",
93
  "IGNORE_REMOVE_FAILURES_OPT",
94
  "IGNORE_SECONDARIES_OPT",
95
  "IGNORE_SIZE_OPT",
96
  "INTERVAL_OPT",
97
  "MAC_PREFIX_OPT",
98
  "MAINTAIN_NODE_HEALTH_OPT",
99
  "MASTER_NETDEV_OPT",
100
  "MC_OPT",
101
  "MIGRATION_MODE_OPT",
102
  "NET_OPT",
103
  "NEW_CLUSTER_CERT_OPT",
104
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
105
  "NEW_CONFD_HMAC_KEY_OPT",
106
  "NEW_RAPI_CERT_OPT",
107
  "NEW_SECONDARY_OPT",
108
  "NIC_PARAMS_OPT",
109
  "NODE_FORCE_JOIN_OPT",
110
  "NODE_LIST_OPT",
111
  "NODE_PLACEMENT_OPT",
112
  "NODEGROUP_OPT",
113
  "NODE_PARAMS_OPT",
114
  "NODE_POWERED_OPT",
115
  "NODRBD_STORAGE_OPT",
116
  "NOHDR_OPT",
117
  "NOIPCHECK_OPT",
118
  "NO_INSTALL_OPT",
119
  "NONAMECHECK_OPT",
120
  "NOLVM_STORAGE_OPT",
121
  "NOMODIFY_ETCHOSTS_OPT",
122
  "NOMODIFY_SSH_SETUP_OPT",
123
  "NONICS_OPT",
124
  "NONLIVE_OPT",
125
  "NONPLUS1_OPT",
126
  "NOSHUTDOWN_OPT",
127
  "NOSTART_OPT",
128
  "NOSSH_KEYCHECK_OPT",
129
  "NOVOTING_OPT",
130
  "NWSYNC_OPT",
131
  "ON_PRIMARY_OPT",
132
  "ON_SECONDARY_OPT",
133
  "OFFLINE_OPT",
134
  "OSPARAMS_OPT",
135
  "OS_OPT",
136
  "OS_SIZE_OPT",
137
  "OOB_TIMEOUT_OPT",
138
  "PREALLOC_WIPE_DISKS_OPT",
139
  "PRIMARY_IP_VERSION_OPT",
140
  "PRIORITY_OPT",
141
  "RAPI_CERT_OPT",
142
  "READD_OPT",
143
  "REBOOT_TYPE_OPT",
144
  "REMOVE_INSTANCE_OPT",
145
  "REMOVE_UIDS_OPT",
146
  "RESERVED_LVS_OPT",
147
  "ROMAN_OPT",
148
  "SECONDARY_IP_OPT",
149
  "SELECT_OS_OPT",
150
  "SEP_OPT",
151
  "SHOWCMD_OPT",
152
  "SHUTDOWN_TIMEOUT_OPT",
153
  "SINGLE_NODE_OPT",
154
  "SRC_DIR_OPT",
155
  "SRC_NODE_OPT",
156
  "SUBMIT_OPT",
157
  "STATIC_OPT",
158
  "SYNC_OPT",
159
  "TAG_SRC_OPT",
160
  "TIMEOUT_OPT",
161
  "UIDPOOL_OPT",
162
  "USEUNITS_OPT",
163
  "USE_REPL_NET_OPT",
164
  "VERBOSE_OPT",
165
  "VG_NAME_OPT",
166
  "YES_DOIT_OPT",
167
  # Generic functions for CLI programs
168
  "ConfirmOperation",
169
  "GenericMain",
170
  "GenericInstanceCreate",
171
  "GenericList",
172
  "GenericListFields",
173
  "GetClient",
174
  "GetOnlineNodes",
175
  "JobExecutor",
176
  "JobSubmittedException",
177
  "ParseTimespec",
178
  "RunWhileClusterStopped",
179
  "SubmitOpCode",
180
  "SubmitOrSend",
181
  "UsesRPC",
182
  # Formatting functions
183
  "ToStderr", "ToStdout",
184
  "FormatError",
185
  "FormatQueryResult",
186
  "FormatParameterDict",
187
  "GenerateTable",
188
  "AskUser",
189
  "FormatTimestamp",
190
  "FormatLogMessage",
191
  # Tags functions
192
  "ListTags",
193
  "AddTags",
194
  "RemoveTags",
195
  # command line options support infrastructure
196
  "ARGS_MANY_INSTANCES",
197
  "ARGS_MANY_NODES",
198
  "ARGS_MANY_GROUPS",
199
  "ARGS_NONE",
200
  "ARGS_ONE_INSTANCE",
201
  "ARGS_ONE_NODE",
202
  "ARGS_ONE_GROUP",
203
  "ARGS_ONE_OS",
204
  "ArgChoice",
205
  "ArgCommand",
206
  "ArgFile",
207
  "ArgGroup",
208
  "ArgHost",
209
  "ArgInstance",
210
  "ArgJobId",
211
  "ArgNode",
212
  "ArgOs",
213
  "ArgSuggest",
214
  "ArgUnknown",
215
  "OPT_COMPL_INST_ADD_NODES",
216
  "OPT_COMPL_MANY_NODES",
217
  "OPT_COMPL_ONE_IALLOCATOR",
218
  "OPT_COMPL_ONE_INSTANCE",
219
  "OPT_COMPL_ONE_NODE",
220
  "OPT_COMPL_ONE_NODEGROUP",
221
  "OPT_COMPL_ONE_OS",
222
  "cli_option",
223
  "SplitNodeOption",
224
  "CalculateOSNames",
225
  "ParseFields",
226
  "COMMON_CREATE_OPTS",
227
  ]
228

    
229
NO_PREFIX = "no_"
230
UN_PREFIX = "-"
231

    
232
#: Priorities (sorted)
233
_PRIORITY_NAMES = [
234
  ("low", constants.OP_PRIO_LOW),
235
  ("normal", constants.OP_PRIO_NORMAL),
236
  ("high", constants.OP_PRIO_HIGH),
237
  ]
238

    
239
#: Priority dictionary for easier lookup
240
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
241
# we migrate to Python 2.6
242
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
243

    
244
# Query result status for clients
245
(QR_NORMAL,
246
 QR_UNKNOWN,
247
 QR_INCOMPLETE) = range(3)
248

    
249

    
250
class _Argument:
251
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
252
    self.min = min
253
    self.max = max
254

    
255
  def __repr__(self):
256
    return ("<%s min=%s max=%s>" %
257
            (self.__class__.__name__, self.min, self.max))
258

    
259

    
260
class ArgSuggest(_Argument):
261
  """Suggesting argument.
262

263
  Value can be any of the ones passed to the constructor.
264

265
  """
266
  # pylint: disable-msg=W0622
267
  def __init__(self, min=0, max=None, choices=None):
268
    _Argument.__init__(self, min=min, max=max)
269
    self.choices = choices
270

    
271
  def __repr__(self):
272
    return ("<%s min=%s max=%s choices=%r>" %
273
            (self.__class__.__name__, self.min, self.max, self.choices))
274

    
275

    
276
class ArgChoice(ArgSuggest):
277
  """Choice argument.
278

279
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
280
  but value must be one of the choices.
281

282
  """
283

    
284

    
285
class ArgUnknown(_Argument):
286
  """Unknown argument to program (e.g. determined at runtime).
287

288
  """
289

    
290

    
291
class ArgInstance(_Argument):
292
  """Instances argument.
293

294
  """
295

    
296

    
297
class ArgNode(_Argument):
298
  """Node argument.
299

300
  """
301

    
302

    
303
class ArgGroup(_Argument):
304
  """Node group argument.
305

306
  """
307

    
308

    
309
class ArgJobId(_Argument):
310
  """Job ID argument.
311

312
  """
313

    
314

    
315
class ArgFile(_Argument):
316
  """File path argument.
317

318
  """
319

    
320

    
321
class ArgCommand(_Argument):
322
  """Command argument.
323

324
  """
325

    
326

    
327
class ArgHost(_Argument):
328
  """Host argument.
329

330
  """
331

    
332

    
333
class ArgOs(_Argument):
334
  """OS argument.
335

336
  """
337

    
338

    
339
ARGS_NONE = []
340
ARGS_MANY_INSTANCES = [ArgInstance()]
341
ARGS_MANY_NODES = [ArgNode()]
342
ARGS_MANY_GROUPS = [ArgGroup()]
343
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
344
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
345
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
346
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
347

    
348

    
349
def _ExtractTagsObject(opts, args):
350
  """Extract the tag type object.
351

352
  Note that this function will modify its args parameter.
353

354
  """
355
  if not hasattr(opts, "tag_type"):
356
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
357
  kind = opts.tag_type
358
  if kind == constants.TAG_CLUSTER:
359
    retval = kind, kind
360
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
361
    if not args:
362
      raise errors.OpPrereqError("no arguments passed to the command")
363
    name = args.pop(0)
364
    retval = kind, name
365
  else:
366
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
367
  return retval
368

    
369

    
370
def _ExtendTags(opts, args):
371
  """Extend the args if a source file has been given.
372

373
  This function will extend the tags with the contents of the file
374
  passed in the 'tags_source' attribute of the opts parameter. A file
375
  named '-' will be replaced by stdin.
376

377
  """
378
  fname = opts.tags_source
379
  if fname is None:
380
    return
381
  if fname == "-":
382
    new_fh = sys.stdin
383
  else:
384
    new_fh = open(fname, "r")
385
  new_data = []
386
  try:
387
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
388
    # because of python bug 1633941
389
    while True:
390
      line = new_fh.readline()
391
      if not line:
392
        break
393
      new_data.append(line.strip())
394
  finally:
395
    new_fh.close()
396
  args.extend(new_data)
397

    
398

    
399
def ListTags(opts, args):
400
  """List the tags on a given object.
401

402
  This is a generic implementation that knows how to deal with all
403
  three cases of tag objects (cluster, node, instance). The opts
404
  argument is expected to contain a tag_type field denoting what
405
  object type we work on.
406

407
  """
408
  kind, name = _ExtractTagsObject(opts, args)
409
  cl = GetClient()
410
  result = cl.QueryTags(kind, name)
411
  result = list(result)
412
  result.sort()
413
  for tag in result:
414
    ToStdout(tag)
415

    
416

    
417
def AddTags(opts, args):
418
  """Add tags on a given object.
419

420
  This is a generic implementation that knows how to deal with all
421
  three cases of tag objects (cluster, node, instance). The opts
422
  argument is expected to contain a tag_type field denoting what
423
  object type we work on.
424

425
  """
426
  kind, name = _ExtractTagsObject(opts, args)
427
  _ExtendTags(opts, args)
428
  if not args:
429
    raise errors.OpPrereqError("No tags to be added")
430
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
431
  SubmitOpCode(op, opts=opts)
432

    
433

    
434
def RemoveTags(opts, args):
435
  """Remove tags from a given object.
436

437
  This is a generic implementation that knows how to deal with all
438
  three cases of tag objects (cluster, node, instance). The opts
439
  argument is expected to contain a tag_type field denoting what
440
  object type we work on.
441

442
  """
443
  kind, name = _ExtractTagsObject(opts, args)
444
  _ExtendTags(opts, args)
445
  if not args:
446
    raise errors.OpPrereqError("No tags to be removed")
447
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
448
  SubmitOpCode(op, opts=opts)
449

    
450

    
451
def check_unit(option, opt, value): # pylint: disable-msg=W0613
452
  """OptParsers custom converter for units.
453

454
  """
455
  try:
456
    return utils.ParseUnit(value)
457
  except errors.UnitParseError, err:
458
    raise OptionValueError("option %s: %s" % (opt, err))
459

    
460

    
461
def _SplitKeyVal(opt, data):
462
  """Convert a KeyVal string into a dict.
463

464
  This function will convert a key=val[,...] string into a dict. Empty
465
  values will be converted specially: keys which have the prefix 'no_'
466
  will have the value=False and the prefix stripped, the others will
467
  have value=True.
468

469
  @type opt: string
470
  @param opt: a string holding the option name for which we process the
471
      data, used in building error messages
472
  @type data: string
473
  @param data: a string of the format key=val,key=val,...
474
  @rtype: dict
475
  @return: {key=val, key=val}
476
  @raises errors.ParameterError: if there are duplicate keys
477

478
  """
479
  kv_dict = {}
480
  if data:
481
    for elem in utils.UnescapeAndSplit(data, sep=","):
482
      if "=" in elem:
483
        key, val = elem.split("=", 1)
484
      else:
485
        if elem.startswith(NO_PREFIX):
486
          key, val = elem[len(NO_PREFIX):], False
487
        elif elem.startswith(UN_PREFIX):
488
          key, val = elem[len(UN_PREFIX):], None
489
        else:
490
          key, val = elem, True
491
      if key in kv_dict:
492
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
493
                                    (key, opt))
494
      kv_dict[key] = val
495
  return kv_dict
496

    
497

    
498
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
499
  """Custom parser for ident:key=val,key=val options.
500

501
  This will store the parsed values as a tuple (ident, {key: val}). As such,
502
  multiple uses of this option via action=append is possible.
503

504
  """
505
  if ":" not in value:
506
    ident, rest = value, ''
507
  else:
508
    ident, rest = value.split(":", 1)
509

    
510
  if ident.startswith(NO_PREFIX):
511
    if rest:
512
      msg = "Cannot pass options when removing parameter groups: %s" % value
513
      raise errors.ParameterError(msg)
514
    retval = (ident[len(NO_PREFIX):], False)
515
  elif ident.startswith(UN_PREFIX):
516
    if rest:
517
      msg = "Cannot pass options when removing parameter groups: %s" % value
518
      raise errors.ParameterError(msg)
519
    retval = (ident[len(UN_PREFIX):], None)
520
  else:
521
    kv_dict = _SplitKeyVal(opt, rest)
522
    retval = (ident, kv_dict)
523
  return retval
524

    
525

    
526
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
527
  """Custom parser class for key=val,key=val options.
528

529
  This will store the parsed values as a dict {key: val}.
530

531
  """
532
  return _SplitKeyVal(opt, value)
533

    
534

    
535
def check_bool(option, opt, value): # pylint: disable-msg=W0613
536
  """Custom parser for yes/no options.
537

538
  This will store the parsed value as either True or False.
539

540
  """
541
  value = value.lower()
542
  if value == constants.VALUE_FALSE or value == "no":
543
    return False
544
  elif value == constants.VALUE_TRUE or value == "yes":
545
    return True
546
  else:
547
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
548

    
549

    
550
# completion_suggestion is normally a list. Using numeric values not evaluating
551
# to False for dynamic completion.
552
(OPT_COMPL_MANY_NODES,
553
 OPT_COMPL_ONE_NODE,
554
 OPT_COMPL_ONE_INSTANCE,
555
 OPT_COMPL_ONE_OS,
556
 OPT_COMPL_ONE_IALLOCATOR,
557
 OPT_COMPL_INST_ADD_NODES,
558
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
559

    
560
OPT_COMPL_ALL = frozenset([
561
  OPT_COMPL_MANY_NODES,
562
  OPT_COMPL_ONE_NODE,
563
  OPT_COMPL_ONE_INSTANCE,
564
  OPT_COMPL_ONE_OS,
565
  OPT_COMPL_ONE_IALLOCATOR,
566
  OPT_COMPL_INST_ADD_NODES,
567
  OPT_COMPL_ONE_NODEGROUP,
568
  ])
569

    
570

    
571
class CliOption(Option):
572
  """Custom option class for optparse.
573

574
  """
575
  ATTRS = Option.ATTRS + [
576
    "completion_suggest",
577
    ]
578
  TYPES = Option.TYPES + (
579
    "identkeyval",
580
    "keyval",
581
    "unit",
582
    "bool",
583
    )
584
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
585
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
586
  TYPE_CHECKER["keyval"] = check_key_val
587
  TYPE_CHECKER["unit"] = check_unit
588
  TYPE_CHECKER["bool"] = check_bool
589

    
590

    
591
# optparse.py sets make_option, so we do it for our own option class, too
592
cli_option = CliOption
593

    
594

    
595
_YORNO = "yes|no"
596

    
597
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
598
                       help="Increase debugging level")
599

    
600
NOHDR_OPT = cli_option("--no-headers", default=False,
601
                       action="store_true", dest="no_headers",
602
                       help="Don't display column headers")
603

    
604
SEP_OPT = cli_option("--separator", default=None,
605
                     action="store", dest="separator",
606
                     help=("Separator between output fields"
607
                           " (defaults to one space)"))
608

    
609
USEUNITS_OPT = cli_option("--units", default=None,
610
                          dest="units", choices=('h', 'm', 'g', 't'),
611
                          help="Specify units for output (one of h/m/g/t)")
612

    
613
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
614
                        type="string", metavar="FIELDS",
615
                        help="Comma separated list of output fields")
616

    
617
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
618
                       default=False, help="Force the operation")
619

    
620
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
621
                         default=False, help="Do not require confirmation")
622

    
623
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
624
                                  action="store_true", default=False,
625
                                  help=("Ignore offline nodes and do as much"
626
                                        " as possible"))
627

    
628
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
629
                         default=None, help="File with tag names")
630

    
631
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
632
                        default=False, action="store_true",
633
                        help=("Submit the job and return the job ID, but"
634
                              " don't wait for the job to finish"))
635

    
636
SYNC_OPT = cli_option("--sync", dest="do_locking",
637
                      default=False, action="store_true",
638
                      help=("Grab locks while doing the queries"
639
                            " in order to ensure more consistent results"))
640

    
641
DRY_RUN_OPT = cli_option("--dry-run", default=False,
642
                         action="store_true",
643
                         help=("Do not execute the operation, just run the"
644
                               " check steps and verify it it could be"
645
                               " executed"))
646

    
647
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
648
                         action="store_true",
649
                         help="Increase the verbosity of the operation")
650

    
651
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
652
                              action="store_true", dest="simulate_errors",
653
                              help="Debugging option that makes the operation"
654
                              " treat most runtime checks as failed")
655

    
656
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
657
                        default=True, action="store_false",
658
                        help="Don't wait for sync (DANGEROUS!)")
659

    
660
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
661
                               help="Custom disk setup (diskless, file,"
662
                               " plain or drbd)",
663
                               default=None, metavar="TEMPL",
664
                               choices=list(constants.DISK_TEMPLATES))
665

    
666
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
667
                        help="Do not create any network cards for"
668
                        " the instance")
669

    
670
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
671
                               help="Relative path under default cluster-wide"
672
                               " file storage dir to store file-based disks",
673
                               default=None, metavar="<DIR>")
674

    
675
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
676
                                  help="Driver to use for image files",
677
                                  default="loop", metavar="<DRIVER>",
678
                                  choices=list(constants.FILE_DRIVER))
679

    
680
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
681
                            help="Select nodes for the instance automatically"
682
                            " using the <NAME> iallocator plugin",
683
                            default=None, type="string",
684
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
685

    
686
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
687
                            metavar="<NAME>",
688
                            help="Set the default instance allocator plugin",
689
                            default=None, type="string",
690
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
691

    
692
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
693
                    metavar="<os>",
694
                    completion_suggest=OPT_COMPL_ONE_OS)
695

    
696
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
697
                         type="keyval", default={},
698
                         help="OS parameters")
699

    
700
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
701
                               action="store_true", default=False,
702
                               help="Force an unknown variant")
703

    
704
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
705
                            action="store_true", default=False,
706
                            help="Do not install the OS (will"
707
                            " enable no-start)")
708

    
709
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
710
                         type="keyval", default={},
711
                         help="Backend parameters")
712

    
713
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
714
                         default={}, dest="hvparams",
715
                         help="Hypervisor parameters")
716

    
717
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
718
                            help="Hypervisor and hypervisor options, in the"
719
                            " format hypervisor:option=value,option=value,...",
720
                            default=None, type="identkeyval")
721

    
722
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
723
                        help="Hypervisor and hypervisor options, in the"
724
                        " format hypervisor:option=value,option=value,...",
725
                        default=[], action="append", type="identkeyval")
726

    
727
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
728
                           action="store_false",
729
                           help="Don't check that the instance's IP"
730
                           " is alive")
731

    
732
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
733
                             default=True, action="store_false",
734
                             help="Don't check that the instance's name"
735
                             " is resolvable")
736

    
737
NET_OPT = cli_option("--net",
738
                     help="NIC parameters", default=[],
739
                     dest="nics", action="append", type="identkeyval")
740

    
741
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
742
                      dest="disks", action="append", type="identkeyval")
743

    
744
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
745
                         help="Comma-separated list of disks"
746
                         " indices to act on (e.g. 0,2) (optional,"
747
                         " defaults to all disks)")
748

    
749
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
750
                         help="Enforces a single-disk configuration using the"
751
                         " given disk size, in MiB unless a suffix is used",
752
                         default=None, type="unit", metavar="<size>")
753

    
754
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
755
                                dest="ignore_consistency",
756
                                action="store_true", default=False,
757
                                help="Ignore the consistency of the disks on"
758
                                " the secondary")
759

    
760
NONLIVE_OPT = cli_option("--non-live", dest="live",
761
                         default=True, action="store_false",
762
                         help="Do a non-live migration (this usually means"
763
                         " freeze the instance, save the state, transfer and"
764
                         " only then resume running on the secondary node)")
765

    
766
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
767
                                default=None,
768
                                choices=list(constants.HT_MIGRATION_MODES),
769
                                help="Override default migration mode (choose"
770
                                " either live or non-live")
771

    
772
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
773
                                help="Target node and optional secondary node",
774
                                metavar="<pnode>[:<snode>]",
775
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
776

    
777
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
778
                           action="append", metavar="<node>",
779
                           help="Use only this node (can be used multiple"
780
                           " times, if not given defaults to all nodes)",
781
                           completion_suggest=OPT_COMPL_ONE_NODE)
782

    
783
NODEGROUP_OPT = cli_option("-g", "--node-group",
784
                           dest="nodegroup",
785
                           help="Node group (name or uuid)",
786
                           metavar="<nodegroup>",
787
                           default=None, type="string",
788
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
789

    
790
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
791
                             metavar="<node>",
792
                             completion_suggest=OPT_COMPL_ONE_NODE)
793

    
794
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
795
                         action="store_false",
796
                         help="Don't start the instance after creation")
797

    
798
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
799
                         action="store_true", default=False,
800
                         help="Show command instead of executing it")
801

    
802
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
803
                         default=False, action="store_true",
804
                         help="Instead of performing the migration, try to"
805
                         " recover from a failed cleanup. This is safe"
806
                         " to run even if the instance is healthy, but it"
807
                         " will create extra replication traffic and "
808
                         " disrupt briefly the replication (like during the"
809
                         " migration")
810

    
811
STATIC_OPT = cli_option("-s", "--static", dest="static",
812
                        action="store_true", default=False,
813
                        help="Only show configuration data, not runtime data")
814

    
815
ALL_OPT = cli_option("--all", dest="show_all",
816
                     default=False, action="store_true",
817
                     help="Show info on all instances on the cluster."
818
                     " This can take a long time to run, use wisely")
819

    
820
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
821
                           action="store_true", default=False,
822
                           help="Interactive OS reinstall, lists available"
823
                           " OS templates for selection")
824

    
825
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
826
                                 action="store_true", default=False,
827
                                 help="Remove the instance from the cluster"
828
                                 " configuration even if there are failures"
829
                                 " during the removal process")
830

    
831
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
832
                                        dest="ignore_remove_failures",
833
                                        action="store_true", default=False,
834
                                        help="Remove the instance from the"
835
                                        " cluster configuration even if there"
836
                                        " are failures during the removal"
837
                                        " process")
838

    
839
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
840
                                 action="store_true", default=False,
841
                                 help="Remove the instance from the cluster")
842

    
843
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
844
                               help="Specifies the new node for the instance",
845
                               metavar="NODE", default=None,
846
                               completion_suggest=OPT_COMPL_ONE_NODE)
847

    
848
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
849
                               help="Specifies the new secondary node",
850
                               metavar="NODE", default=None,
851
                               completion_suggest=OPT_COMPL_ONE_NODE)
852

    
853
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
854
                            default=False, action="store_true",
855
                            help="Replace the disk(s) on the primary"
856
                            " node (only for the drbd template)")
857

    
858
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
859
                              default=False, action="store_true",
860
                              help="Replace the disk(s) on the secondary"
861
                              " node (only for the drbd template)")
862

    
863
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
864
                              default=False, action="store_true",
865
                              help="Lock all nodes and auto-promote as needed"
866
                              " to MC status")
867

    
868
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
869
                              default=False, action="store_true",
870
                              help="Automatically replace faulty disks"
871
                              " (only for the drbd template)")
872

    
873
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
874
                             default=False, action="store_true",
875
                             help="Ignore current recorded size"
876
                             " (useful for forcing activation when"
877
                             " the recorded size is wrong)")
878

    
879
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
880
                          metavar="<node>",
881
                          completion_suggest=OPT_COMPL_ONE_NODE)
882

    
883
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
884
                         metavar="<dir>")
885

    
886
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
887
                              help="Specify the secondary ip for the node",
888
                              metavar="ADDRESS", default=None)
889

    
890
READD_OPT = cli_option("--readd", dest="readd",
891
                       default=False, action="store_true",
892
                       help="Readd old node after replacing it")
893

    
894
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
895
                                default=True, action="store_false",
896
                                help="Disable SSH key fingerprint checking")
897

    
898
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
899
                                 default=False, action="store_true",
900
                                 help="Force the joining of a node,"
901
                                      " needed when merging clusters")
902

    
903
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
904
                    type="bool", default=None, metavar=_YORNO,
905
                    help="Set the master_candidate flag on the node")
906

    
907
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
908
                         type="bool", default=None,
909
                         help=("Set the offline flag on the node"
910
                               " (cluster does not communicate with offline"
911
                               " nodes)"))
912

    
913
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
914
                         type="bool", default=None,
915
                         help=("Set the drained flag on the node"
916
                               " (excluded from allocation operations)"))
917

    
918
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
919
                    type="bool", default=None, metavar=_YORNO,
920
                    help="Set the master_capable flag on the node")
921

    
922
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
923
                    type="bool", default=None, metavar=_YORNO,
924
                    help="Set the vm_capable flag on the node")
925

    
926
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
927
                             type="bool", default=None, metavar=_YORNO,
928
                             help="Set the allocatable flag on a volume")
929

    
930
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
931
                               help="Disable support for lvm based instances"
932
                               " (cluster-wide)",
933
                               action="store_false", default=True)
934

    
935
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
936
                            dest="enabled_hypervisors",
937
                            help="Comma-separated list of hypervisors",
938
                            type="string", default=None)
939

    
940
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
941
                            type="keyval", default={},
942
                            help="NIC parameters")
943

    
944
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
945
                         dest="candidate_pool_size", type="int",
946
                         help="Set the candidate pool size")
947

    
948
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
949
                         help=("Enables LVM and specifies the volume group"
950
                               " name (cluster-wide) for disk allocation"
951
                               " [%s]" % constants.DEFAULT_VG),
952
                         metavar="VG", default=None)
953

    
954
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
955
                          help="Destroy cluster", action="store_true")
956

    
957
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
958
                          help="Skip node agreement check (dangerous)",
959
                          action="store_true", default=False)
960

    
961
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
962
                            help="Specify the mac prefix for the instance IP"
963
                            " addresses, in the format XX:XX:XX",
964
                            metavar="PREFIX",
965
                            default=None)
966

    
967
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
968
                               help="Specify the node interface (cluster-wide)"
969
                               " on which the master IP address will be added"
970
                               " (cluster init default: %s)" %
971
                               constants.DEFAULT_BRIDGE,
972
                               metavar="NETDEV",
973
                               default=None)
974

    
975
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
976
                                help="Specify the default directory (cluster-"
977
                                "wide) for storing the file-based disks [%s]" %
978
                                constants.DEFAULT_FILE_STORAGE_DIR,
979
                                metavar="DIR",
980
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
981

    
982
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
983
                            dest="shared_file_storage_dir",
984
                            help="Specify the default directory (cluster-"
985
                            "wide) for storing the shared file-based"
986
                            " disks [%s]" %
987
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
988
                            metavar="SHAREDDIR",
989
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
990

    
991
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
992
                                   help="Don't modify /etc/hosts",
993
                                   action="store_false", default=True)
994

    
995
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
996
                                    help="Don't initialize SSH keys",
997
                                    action="store_false", default=True)
998

    
999
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1000
                             help="Enable parseable error messages",
1001
                             action="store_true", default=False)
1002

    
1003
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1004
                          help="Skip N+1 memory redundancy tests",
1005
                          action="store_true", default=False)
1006

    
1007
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1008
                             help="Type of reboot: soft/hard/full",
1009
                             default=constants.INSTANCE_REBOOT_HARD,
1010
                             metavar="<REBOOT>",
1011
                             choices=list(constants.REBOOT_TYPES))
1012

    
1013
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1014
                                    dest="ignore_secondaries",
1015
                                    default=False, action="store_true",
1016
                                    help="Ignore errors from secondaries")
1017

    
1018
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1019
                            action="store_false", default=True,
1020
                            help="Don't shutdown the instance (unsafe)")
1021

    
1022
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1023
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1024
                         help="Maximum time to wait")
1025

    
1026
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1027
                         dest="shutdown_timeout", type="int",
1028
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1029
                         help="Maximum time to wait for instance shutdown")
1030

    
1031
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1032
                          default=None,
1033
                          help=("Number of seconds between repetions of the"
1034
                                " command"))
1035

    
1036
EARLY_RELEASE_OPT = cli_option("--early-release",
1037
                               dest="early_release", default=False,
1038
                               action="store_true",
1039
                               help="Release the locks on the secondary"
1040
                               " node(s) early")
1041

    
1042
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1043
                                  dest="new_cluster_cert",
1044
                                  default=False, action="store_true",
1045
                                  help="Generate a new cluster certificate")
1046

    
1047
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1048
                           default=None,
1049
                           help="File containing new RAPI certificate")
1050

    
1051
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1052
                               default=None, action="store_true",
1053
                               help=("Generate a new self-signed RAPI"
1054
                                     " certificate"))
1055

    
1056
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1057
                                    dest="new_confd_hmac_key",
1058
                                    default=False, action="store_true",
1059
                                    help=("Create a new HMAC key for %s" %
1060
                                          constants.CONFD))
1061

    
1062
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1063
                                       dest="cluster_domain_secret",
1064
                                       default=None,
1065
                                       help=("Load new new cluster domain"
1066
                                             " secret from file"))
1067

    
1068
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1069
                                           dest="new_cluster_domain_secret",
1070
                                           default=False, action="store_true",
1071
                                           help=("Create a new cluster domain"
1072
                                                 " secret"))
1073

    
1074
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1075
                              dest="use_replication_network",
1076
                              help="Whether to use the replication network"
1077
                              " for talking to the nodes",
1078
                              action="store_true", default=False)
1079

    
1080
MAINTAIN_NODE_HEALTH_OPT = \
1081
    cli_option("--maintain-node-health", dest="maintain_node_health",
1082
               metavar=_YORNO, default=None, type="bool",
1083
               help="Configure the cluster to automatically maintain node"
1084
               " health, by shutting down unknown instances, shutting down"
1085
               " unknown DRBD devices, etc.")
1086

    
1087
IDENTIFY_DEFAULTS_OPT = \
1088
    cli_option("--identify-defaults", dest="identify_defaults",
1089
               default=False, action="store_true",
1090
               help="Identify which saved instance parameters are equal to"
1091
               " the current cluster defaults and set them as such, instead"
1092
               " of marking them as overridden")
1093

    
1094
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1095
                         action="store", dest="uid_pool",
1096
                         help=("A list of user-ids or user-id"
1097
                               " ranges separated by commas"))
1098

    
1099
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1100
                          action="store", dest="add_uids",
1101
                          help=("A list of user-ids or user-id"
1102
                                " ranges separated by commas, to be"
1103
                                " added to the user-id pool"))
1104

    
1105
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1106
                             action="store", dest="remove_uids",
1107
                             help=("A list of user-ids or user-id"
1108
                                   " ranges separated by commas, to be"
1109
                                   " removed from the user-id pool"))
1110

    
1111
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1112
                             action="store", dest="reserved_lvs",
1113
                             help=("A comma-separated list of reserved"
1114
                                   " logical volumes names, that will be"
1115
                                   " ignored by cluster verify"))
1116

    
1117
ROMAN_OPT = cli_option("--roman",
1118
                       dest="roman_integers", default=False,
1119
                       action="store_true",
1120
                       help="Use roman numbers for positive integers")
1121

    
1122
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1123
                             action="store", default=None,
1124
                             help="Specifies usermode helper for DRBD")
1125

    
1126
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1127
                                action="store_false", default=True,
1128
                                help="Disable support for DRBD")
1129

    
1130
PRIMARY_IP_VERSION_OPT = \
1131
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1132
               action="store", dest="primary_ip_version",
1133
               metavar="%d|%d" % (constants.IP4_VERSION,
1134
                                  constants.IP6_VERSION),
1135
               help="Cluster-wide IP version for primary IP")
1136

    
1137
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1138
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1139
                          choices=_PRIONAME_TO_VALUE.keys(),
1140
                          help="Priority for opcode processing")
1141

    
1142
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1143
                        type="bool", default=None, metavar=_YORNO,
1144
                        help="Sets the hidden flag on the OS")
1145

    
1146
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1147
                        type="bool", default=None, metavar=_YORNO,
1148
                        help="Sets the blacklisted flag on the OS")
1149

    
1150
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1151
                                     type="bool", metavar=_YORNO,
1152
                                     dest="prealloc_wipe_disks",
1153
                                     help=("Wipe disks prior to instance"
1154
                                           " creation"))
1155

    
1156
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1157
                             type="keyval", default=None,
1158
                             help="Node parameters")
1159

    
1160
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1161
                              action="store", metavar="POLICY", default=None,
1162
                              help="Allocation policy for the node group")
1163

    
1164
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1165
                              type="bool", metavar=_YORNO,
1166
                              dest="node_powered",
1167
                              help="Specify if the SoR for node is powered")
1168

    
1169
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1170
                         default=constants.OOB_TIMEOUT,
1171
                         help="Maximum time to wait for out-of-band helper")
1172

    
1173

    
1174
#: Options provided by all commands
1175
COMMON_OPTS = [DEBUG_OPT]
1176

    
1177
# common options for creating instances. add and import then add their own
1178
# specific ones.
1179
COMMON_CREATE_OPTS = [
1180
  BACKEND_OPT,
1181
  DISK_OPT,
1182
  DISK_TEMPLATE_OPT,
1183
  FILESTORE_DIR_OPT,
1184
  FILESTORE_DRIVER_OPT,
1185
  HYPERVISOR_OPT,
1186
  IALLOCATOR_OPT,
1187
  NET_OPT,
1188
  NODE_PLACEMENT_OPT,
1189
  NOIPCHECK_OPT,
1190
  NONAMECHECK_OPT,
1191
  NONICS_OPT,
1192
  NWSYNC_OPT,
1193
  OSPARAMS_OPT,
1194
  OS_SIZE_OPT,
1195
  SUBMIT_OPT,
1196
  DRY_RUN_OPT,
1197
  PRIORITY_OPT,
1198
  ]
1199

    
1200

    
1201
def _ParseArgs(argv, commands, aliases):
1202
  """Parser for the command line arguments.
1203

1204
  This function parses the arguments and returns the function which
1205
  must be executed together with its (modified) arguments.
1206

1207
  @param argv: the command line
1208
  @param commands: dictionary with special contents, see the design
1209
      doc for cmdline handling
1210
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1211

1212
  """
1213
  if len(argv) == 0:
1214
    binary = "<command>"
1215
  else:
1216
    binary = argv[0].split("/")[-1]
1217

    
1218
  if len(argv) > 1 and argv[1] == "--version":
1219
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1220
             constants.RELEASE_VERSION)
1221
    # Quit right away. That way we don't have to care about this special
1222
    # argument. optparse.py does it the same.
1223
    sys.exit(0)
1224

    
1225
  if len(argv) < 2 or not (argv[1] in commands or
1226
                           argv[1] in aliases):
1227
    # let's do a nice thing
1228
    sortedcmds = commands.keys()
1229
    sortedcmds.sort()
1230

    
1231
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1232
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1233
    ToStdout("")
1234

    
1235
    # compute the max line length for cmd + usage
1236
    mlen = max([len(" %s" % cmd) for cmd in commands])
1237
    mlen = min(60, mlen) # should not get here...
1238

    
1239
    # and format a nice command list
1240
    ToStdout("Commands:")
1241
    for cmd in sortedcmds:
1242
      cmdstr = " %s" % (cmd,)
1243
      help_text = commands[cmd][4]
1244
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1245
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1246
      for line in help_lines:
1247
        ToStdout("%-*s   %s", mlen, "", line)
1248

    
1249
    ToStdout("")
1250

    
1251
    return None, None, None
1252

    
1253
  # get command, unalias it, and look it up in commands
1254
  cmd = argv.pop(1)
1255
  if cmd in aliases:
1256
    if cmd in commands:
1257
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1258
                                   " command" % cmd)
1259

    
1260
    if aliases[cmd] not in commands:
1261
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1262
                                   " command '%s'" % (cmd, aliases[cmd]))
1263

    
1264
    cmd = aliases[cmd]
1265

    
1266
  func, args_def, parser_opts, usage, description = commands[cmd]
1267
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1268
                        description=description,
1269
                        formatter=TitledHelpFormatter(),
1270
                        usage="%%prog %s %s" % (cmd, usage))
1271
  parser.disable_interspersed_args()
1272
  options, args = parser.parse_args()
1273

    
1274
  if not _CheckArguments(cmd, args_def, args):
1275
    return None, None, None
1276

    
1277
  return func, options, args
1278

    
1279

    
1280
def _CheckArguments(cmd, args_def, args):
1281
  """Verifies the arguments using the argument definition.
1282

1283
  Algorithm:
1284

1285
    1. Abort with error if values specified by user but none expected.
1286

1287
    1. For each argument in definition
1288

1289
      1. Keep running count of minimum number of values (min_count)
1290
      1. Keep running count of maximum number of values (max_count)
1291
      1. If it has an unlimited number of values
1292

1293
        1. Abort with error if it's not the last argument in the definition
1294

1295
    1. If last argument has limited number of values
1296

1297
      1. Abort with error if number of values doesn't match or is too large
1298

1299
    1. Abort with error if user didn't pass enough values (min_count)
1300

1301
  """
1302
  if args and not args_def:
1303
    ToStderr("Error: Command %s expects no arguments", cmd)
1304
    return False
1305

    
1306
  min_count = None
1307
  max_count = None
1308
  check_max = None
1309

    
1310
  last_idx = len(args_def) - 1
1311

    
1312
  for idx, arg in enumerate(args_def):
1313
    if min_count is None:
1314
      min_count = arg.min
1315
    elif arg.min is not None:
1316
      min_count += arg.min
1317

    
1318
    if max_count is None:
1319
      max_count = arg.max
1320
    elif arg.max is not None:
1321
      max_count += arg.max
1322

    
1323
    if idx == last_idx:
1324
      check_max = (arg.max is not None)
1325

    
1326
    elif arg.max is None:
1327
      raise errors.ProgrammerError("Only the last argument can have max=None")
1328

    
1329
  if check_max:
1330
    # Command with exact number of arguments
1331
    if (min_count is not None and max_count is not None and
1332
        min_count == max_count and len(args) != min_count):
1333
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1334
      return False
1335

    
1336
    # Command with limited number of arguments
1337
    if max_count is not None and len(args) > max_count:
1338
      ToStderr("Error: Command %s expects only %d argument(s)",
1339
               cmd, max_count)
1340
      return False
1341

    
1342
  # Command with some required arguments
1343
  if min_count is not None and len(args) < min_count:
1344
    ToStderr("Error: Command %s expects at least %d argument(s)",
1345
             cmd, min_count)
1346
    return False
1347

    
1348
  return True
1349

    
1350

    
1351
def SplitNodeOption(value):
1352
  """Splits the value of a --node option.
1353

1354
  """
1355
  if value and ':' in value:
1356
    return value.split(':', 1)
1357
  else:
1358
    return (value, None)
1359

    
1360

    
1361
def CalculateOSNames(os_name, os_variants):
1362
  """Calculates all the names an OS can be called, according to its variants.
1363

1364
  @type os_name: string
1365
  @param os_name: base name of the os
1366
  @type os_variants: list or None
1367
  @param os_variants: list of supported variants
1368
  @rtype: list
1369
  @return: list of valid names
1370

1371
  """
1372
  if os_variants:
1373
    return ['%s+%s' % (os_name, v) for v in os_variants]
1374
  else:
1375
    return [os_name]
1376

    
1377

    
1378
def ParseFields(selected, default):
1379
  """Parses the values of "--field"-like options.
1380

1381
  @type selected: string or None
1382
  @param selected: User-selected options
1383
  @type default: list
1384
  @param default: Default fields
1385

1386
  """
1387
  if selected is None:
1388
    return default
1389

    
1390
  if selected.startswith("+"):
1391
    return default + selected[1:].split(",")
1392

    
1393
  return selected.split(",")
1394

    
1395

    
1396
UsesRPC = rpc.RunWithRPC
1397

    
1398

    
1399
def AskUser(text, choices=None):
1400
  """Ask the user a question.
1401

1402
  @param text: the question to ask
1403

1404
  @param choices: list with elements tuples (input_char, return_value,
1405
      description); if not given, it will default to: [('y', True,
1406
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1407
      note that the '?' char is reserved for help
1408

1409
  @return: one of the return values from the choices list; if input is
1410
      not possible (i.e. not running with a tty, we return the last
1411
      entry from the list
1412

1413
  """
1414
  if choices is None:
1415
    choices = [('y', True, 'Perform the operation'),
1416
               ('n', False, 'Do not perform the operation')]
1417
  if not choices or not isinstance(choices, list):
1418
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1419
  for entry in choices:
1420
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1421
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1422

    
1423
  answer = choices[-1][1]
1424
  new_text = []
1425
  for line in text.splitlines():
1426
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1427
  text = "\n".join(new_text)
1428
  try:
1429
    f = file("/dev/tty", "a+")
1430
  except IOError:
1431
    return answer
1432
  try:
1433
    chars = [entry[0] for entry in choices]
1434
    chars[-1] = "[%s]" % chars[-1]
1435
    chars.append('?')
1436
    maps = dict([(entry[0], entry[1]) for entry in choices])
1437
    while True:
1438
      f.write(text)
1439
      f.write('\n')
1440
      f.write("/".join(chars))
1441
      f.write(": ")
1442
      line = f.readline(2).strip().lower()
1443
      if line in maps:
1444
        answer = maps[line]
1445
        break
1446
      elif line == '?':
1447
        for entry in choices:
1448
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1449
        f.write("\n")
1450
        continue
1451
  finally:
1452
    f.close()
1453
  return answer
1454

    
1455

    
1456
class JobSubmittedException(Exception):
1457
  """Job was submitted, client should exit.
1458

1459
  This exception has one argument, the ID of the job that was
1460
  submitted. The handler should print this ID.
1461

1462
  This is not an error, just a structured way to exit from clients.
1463

1464
  """
1465

    
1466

    
1467
def SendJob(ops, cl=None):
1468
  """Function to submit an opcode without waiting for the results.
1469

1470
  @type ops: list
1471
  @param ops: list of opcodes
1472
  @type cl: luxi.Client
1473
  @param cl: the luxi client to use for communicating with the master;
1474
             if None, a new client will be created
1475

1476
  """
1477
  if cl is None:
1478
    cl = GetClient()
1479

    
1480
  job_id = cl.SubmitJob(ops)
1481

    
1482
  return job_id
1483

    
1484

    
1485
def GenericPollJob(job_id, cbs, report_cbs):
1486
  """Generic job-polling function.
1487

1488
  @type job_id: number
1489
  @param job_id: Job ID
1490
  @type cbs: Instance of L{JobPollCbBase}
1491
  @param cbs: Data callbacks
1492
  @type report_cbs: Instance of L{JobPollReportCbBase}
1493
  @param report_cbs: Reporting callbacks
1494

1495
  """
1496
  prev_job_info = None
1497
  prev_logmsg_serial = None
1498

    
1499
  status = None
1500

    
1501
  while True:
1502
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1503
                                      prev_logmsg_serial)
1504
    if not result:
1505
      # job not found, go away!
1506
      raise errors.JobLost("Job with id %s lost" % job_id)
1507

    
1508
    if result == constants.JOB_NOTCHANGED:
1509
      report_cbs.ReportNotChanged(job_id, status)
1510

    
1511
      # Wait again
1512
      continue
1513

    
1514
    # Split result, a tuple of (field values, log entries)
1515
    (job_info, log_entries) = result
1516
    (status, ) = job_info
1517

    
1518
    if log_entries:
1519
      for log_entry in log_entries:
1520
        (serial, timestamp, log_type, message) = log_entry
1521
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1522
                                    log_type, message)
1523
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1524

    
1525
    # TODO: Handle canceled and archived jobs
1526
    elif status in (constants.JOB_STATUS_SUCCESS,
1527
                    constants.JOB_STATUS_ERROR,
1528
                    constants.JOB_STATUS_CANCELING,
1529
                    constants.JOB_STATUS_CANCELED):
1530
      break
1531

    
1532
    prev_job_info = job_info
1533

    
1534
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1535
  if not jobs:
1536
    raise errors.JobLost("Job with id %s lost" % job_id)
1537

    
1538
  status, opstatus, result = jobs[0]
1539

    
1540
  if status == constants.JOB_STATUS_SUCCESS:
1541
    return result
1542

    
1543
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1544
    raise errors.OpExecError("Job was canceled")
1545

    
1546
  has_ok = False
1547
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1548
    if status == constants.OP_STATUS_SUCCESS:
1549
      has_ok = True
1550
    elif status == constants.OP_STATUS_ERROR:
1551
      errors.MaybeRaise(msg)
1552

    
1553
      if has_ok:
1554
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1555
                                 (idx, msg))
1556

    
1557
      raise errors.OpExecError(str(msg))
1558

    
1559
  # default failure mode
1560
  raise errors.OpExecError(result)
1561

    
1562

    
1563
class JobPollCbBase:
1564
  """Base class for L{GenericPollJob} callbacks.
1565

1566
  """
1567
  def __init__(self):
1568
    """Initializes this class.
1569

1570
    """
1571

    
1572
  def WaitForJobChangeOnce(self, job_id, fields,
1573
                           prev_job_info, prev_log_serial):
1574
    """Waits for changes on a job.
1575

1576
    """
1577
    raise NotImplementedError()
1578

    
1579
  def QueryJobs(self, job_ids, fields):
1580
    """Returns the selected fields for the selected job IDs.
1581

1582
    @type job_ids: list of numbers
1583
    @param job_ids: Job IDs
1584
    @type fields: list of strings
1585
    @param fields: Fields
1586

1587
    """
1588
    raise NotImplementedError()
1589

    
1590

    
1591
class JobPollReportCbBase:
1592
  """Base class for L{GenericPollJob} reporting callbacks.
1593

1594
  """
1595
  def __init__(self):
1596
    """Initializes this class.
1597

1598
    """
1599

    
1600
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1601
    """Handles a log message.
1602

1603
    """
1604
    raise NotImplementedError()
1605

    
1606
  def ReportNotChanged(self, job_id, status):
1607
    """Called for if a job hasn't changed in a while.
1608

1609
    @type job_id: number
1610
    @param job_id: Job ID
1611
    @type status: string or None
1612
    @param status: Job status if available
1613

1614
    """
1615
    raise NotImplementedError()
1616

    
1617

    
1618
class _LuxiJobPollCb(JobPollCbBase):
1619
  def __init__(self, cl):
1620
    """Initializes this class.
1621

1622
    """
1623
    JobPollCbBase.__init__(self)
1624
    self.cl = cl
1625

    
1626
  def WaitForJobChangeOnce(self, job_id, fields,
1627
                           prev_job_info, prev_log_serial):
1628
    """Waits for changes on a job.
1629

1630
    """
1631
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1632
                                        prev_job_info, prev_log_serial)
1633

    
1634
  def QueryJobs(self, job_ids, fields):
1635
    """Returns the selected fields for the selected job IDs.
1636

1637
    """
1638
    return self.cl.QueryJobs(job_ids, fields)
1639

    
1640

    
1641
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1642
  def __init__(self, feedback_fn):
1643
    """Initializes this class.
1644

1645
    """
1646
    JobPollReportCbBase.__init__(self)
1647

    
1648
    self.feedback_fn = feedback_fn
1649

    
1650
    assert callable(feedback_fn)
1651

    
1652
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1653
    """Handles a log message.
1654

1655
    """
1656
    self.feedback_fn((timestamp, log_type, log_msg))
1657

    
1658
  def ReportNotChanged(self, job_id, status):
1659
    """Called if a job hasn't changed in a while.
1660

1661
    """
1662
    # Ignore
1663

    
1664

    
1665
class StdioJobPollReportCb(JobPollReportCbBase):
1666
  def __init__(self):
1667
    """Initializes this class.
1668

1669
    """
1670
    JobPollReportCbBase.__init__(self)
1671

    
1672
    self.notified_queued = False
1673
    self.notified_waitlock = False
1674

    
1675
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1676
    """Handles a log message.
1677

1678
    """
1679
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1680
             FormatLogMessage(log_type, log_msg))
1681

    
1682
  def ReportNotChanged(self, job_id, status):
1683
    """Called if a job hasn't changed in a while.
1684

1685
    """
1686
    if status is None:
1687
      return
1688

    
1689
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1690
      ToStderr("Job %s is waiting in queue", job_id)
1691
      self.notified_queued = True
1692

    
1693
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1694
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1695
      self.notified_waitlock = True
1696

    
1697

    
1698
def FormatLogMessage(log_type, log_msg):
1699
  """Formats a job message according to its type.
1700

1701
  """
1702
  if log_type != constants.ELOG_MESSAGE:
1703
    log_msg = str(log_msg)
1704

    
1705
  return utils.SafeEncode(log_msg)
1706

    
1707

    
1708
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1709
  """Function to poll for the result of a job.
1710

1711
  @type job_id: job identified
1712
  @param job_id: the job to poll for results
1713
  @type cl: luxi.Client
1714
  @param cl: the luxi client to use for communicating with the master;
1715
             if None, a new client will be created
1716

1717
  """
1718
  if cl is None:
1719
    cl = GetClient()
1720

    
1721
  if reporter is None:
1722
    if feedback_fn:
1723
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1724
    else:
1725
      reporter = StdioJobPollReportCb()
1726
  elif feedback_fn:
1727
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1728

    
1729
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1730

    
1731

    
1732
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1733
  """Legacy function to submit an opcode.
1734

1735
  This is just a simple wrapper over the construction of the processor
1736
  instance. It should be extended to better handle feedback and
1737
  interaction functions.
1738

1739
  """
1740
  if cl is None:
1741
    cl = GetClient()
1742

    
1743
  SetGenericOpcodeOpts([op], opts)
1744

    
1745
  job_id = SendJob([op], cl=cl)
1746

    
1747
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1748
                       reporter=reporter)
1749

    
1750
  return op_results[0]
1751

    
1752

    
1753
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1754
  """Wrapper around SubmitOpCode or SendJob.
1755

1756
  This function will decide, based on the 'opts' parameter, whether to
1757
  submit and wait for the result of the opcode (and return it), or
1758
  whether to just send the job and print its identifier. It is used in
1759
  order to simplify the implementation of the '--submit' option.
1760

1761
  It will also process the opcodes if we're sending the via SendJob
1762
  (otherwise SubmitOpCode does it).
1763

1764
  """
1765
  if opts and opts.submit_only:
1766
    job = [op]
1767
    SetGenericOpcodeOpts(job, opts)
1768
    job_id = SendJob(job, cl=cl)
1769
    raise JobSubmittedException(job_id)
1770
  else:
1771
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1772

    
1773

    
1774
def SetGenericOpcodeOpts(opcode_list, options):
1775
  """Processor for generic options.
1776

1777
  This function updates the given opcodes based on generic command
1778
  line options (like debug, dry-run, etc.).
1779

1780
  @param opcode_list: list of opcodes
1781
  @param options: command line options or None
1782
  @return: None (in-place modification)
1783

1784
  """
1785
  if not options:
1786
    return
1787
  for op in opcode_list:
1788
    op.debug_level = options.debug
1789
    if hasattr(options, "dry_run"):
1790
      op.dry_run = options.dry_run
1791
    if getattr(options, "priority", None) is not None:
1792
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1793

    
1794

    
1795
def GetClient():
1796
  # TODO: Cache object?
1797
  try:
1798
    client = luxi.Client()
1799
  except luxi.NoMasterError:
1800
    ss = ssconf.SimpleStore()
1801

    
1802
    # Try to read ssconf file
1803
    try:
1804
      ss.GetMasterNode()
1805
    except errors.ConfigurationError:
1806
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1807
                                 " not part of a cluster")
1808

    
1809
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1810
    if master != myself:
1811
      raise errors.OpPrereqError("This is not the master node, please connect"
1812
                                 " to node '%s' and rerun the command" %
1813
                                 master)
1814
    raise
1815
  return client
1816

    
1817

    
1818
def FormatError(err):
1819
  """Return a formatted error message for a given error.
1820

1821
  This function takes an exception instance and returns a tuple
1822
  consisting of two values: first, the recommended exit code, and
1823
  second, a string describing the error message (not
1824
  newline-terminated).
1825

1826
  """
1827
  retcode = 1
1828
  obuf = StringIO()
1829
  msg = str(err)
1830
  if isinstance(err, errors.ConfigurationError):
1831
    txt = "Corrupt configuration file: %s" % msg
1832
    logging.error(txt)
1833
    obuf.write(txt + "\n")
1834
    obuf.write("Aborting.")
1835
    retcode = 2
1836
  elif isinstance(err, errors.HooksAbort):
1837
    obuf.write("Failure: hooks execution failed:\n")
1838
    for node, script, out in err.args[0]:
1839
      if out:
1840
        obuf.write("  node: %s, script: %s, output: %s\n" %
1841
                   (node, script, out))
1842
      else:
1843
        obuf.write("  node: %s, script: %s (no output)\n" %
1844
                   (node, script))
1845
  elif isinstance(err, errors.HooksFailure):
1846
    obuf.write("Failure: hooks general failure: %s" % msg)
1847
  elif isinstance(err, errors.ResolverError):
1848
    this_host = netutils.Hostname.GetSysName()
1849
    if err.args[0] == this_host:
1850
      msg = "Failure: can't resolve my own hostname ('%s')"
1851
    else:
1852
      msg = "Failure: can't resolve hostname '%s'"
1853
    obuf.write(msg % err.args[0])
1854
  elif isinstance(err, errors.OpPrereqError):
1855
    if len(err.args) == 2:
1856
      obuf.write("Failure: prerequisites not met for this"
1857
               " operation:\nerror type: %s, error details:\n%s" %
1858
                 (err.args[1], err.args[0]))
1859
    else:
1860
      obuf.write("Failure: prerequisites not met for this"
1861
                 " operation:\n%s" % msg)
1862
  elif isinstance(err, errors.OpExecError):
1863
    obuf.write("Failure: command execution error:\n%s" % msg)
1864
  elif isinstance(err, errors.TagError):
1865
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1866
  elif isinstance(err, errors.JobQueueDrainError):
1867
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1868
               " accept new requests\n")
1869
  elif isinstance(err, errors.JobQueueFull):
1870
    obuf.write("Failure: the job queue is full and doesn't accept new"
1871
               " job submissions until old jobs are archived\n")
1872
  elif isinstance(err, errors.TypeEnforcementError):
1873
    obuf.write("Parameter Error: %s" % msg)
1874
  elif isinstance(err, errors.ParameterError):
1875
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1876
  elif isinstance(err, luxi.NoMasterError):
1877
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1878
               " and listening for connections?")
1879
  elif isinstance(err, luxi.TimeoutError):
1880
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1881
               " been submitted and will continue to run even if the call"
1882
               " timed out. Useful commands in this situation are \"gnt-job"
1883
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1884
    obuf.write(msg)
1885
  elif isinstance(err, luxi.PermissionError):
1886
    obuf.write("It seems you don't have permissions to connect to the"
1887
               " master daemon.\nPlease retry as a different user.")
1888
  elif isinstance(err, luxi.ProtocolError):
1889
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1890
               "%s" % msg)
1891
  elif isinstance(err, errors.JobLost):
1892
    obuf.write("Error checking job status: %s" % msg)
1893
  elif isinstance(err, errors.GenericError):
1894
    obuf.write("Unhandled Ganeti error: %s" % msg)
1895
  elif isinstance(err, JobSubmittedException):
1896
    obuf.write("JobID: %s\n" % err.args[0])
1897
    retcode = 0
1898
  else:
1899
    obuf.write("Unhandled exception: %s" % msg)
1900
  return retcode, obuf.getvalue().rstrip('\n')
1901

    
1902

    
1903
def GenericMain(commands, override=None, aliases=None):
1904
  """Generic main function for all the gnt-* commands.
1905

1906
  Arguments:
1907
    - commands: a dictionary with a special structure, see the design doc
1908
                for command line handling.
1909
    - override: if not None, we expect a dictionary with keys that will
1910
                override command line options; this can be used to pass
1911
                options from the scripts to generic functions
1912
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1913

1914
  """
1915
  # save the program name and the entire command line for later logging
1916
  if sys.argv:
1917
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1918
    if len(sys.argv) >= 2:
1919
      binary += " " + sys.argv[1]
1920
      old_cmdline = " ".join(sys.argv[2:])
1921
    else:
1922
      old_cmdline = ""
1923
  else:
1924
    binary = "<unknown program>"
1925
    old_cmdline = ""
1926

    
1927
  if aliases is None:
1928
    aliases = {}
1929

    
1930
  try:
1931
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1932
  except errors.ParameterError, err:
1933
    result, err_msg = FormatError(err)
1934
    ToStderr(err_msg)
1935
    return 1
1936

    
1937
  if func is None: # parse error
1938
    return 1
1939

    
1940
  if override is not None:
1941
    for key, val in override.iteritems():
1942
      setattr(options, key, val)
1943

    
1944
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1945
                     stderr_logging=True)
1946

    
1947
  if old_cmdline:
1948
    logging.info("run with arguments '%s'", old_cmdline)
1949
  else:
1950
    logging.info("run with no arguments")
1951

    
1952
  try:
1953
    result = func(options, args)
1954
  except (errors.GenericError, luxi.ProtocolError,
1955
          JobSubmittedException), err:
1956
    result, err_msg = FormatError(err)
1957
    logging.exception("Error during command processing")
1958
    ToStderr(err_msg)
1959
  except KeyboardInterrupt:
1960
    result = constants.EXIT_FAILURE
1961
    ToStderr("Aborted. Note that if the operation created any jobs, they"
1962
             " might have been submitted and"
1963
             " will continue to run in the background.")
1964

    
1965
  return result
1966

    
1967

    
1968
def ParseNicOption(optvalue):
1969
  """Parses the value of the --net option(s).
1970

1971
  """
1972
  try:
1973
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1974
  except (TypeError, ValueError), err:
1975
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1976

    
1977
  nics = [{}] * nic_max
1978
  for nidx, ndict in optvalue:
1979
    nidx = int(nidx)
1980

    
1981
    if not isinstance(ndict, dict):
1982
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1983
                                 " got %s" % (nidx, ndict))
1984

    
1985
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1986

    
1987
    nics[nidx] = ndict
1988

    
1989
  return nics
1990

    
1991

    
1992
def GenericInstanceCreate(mode, opts, args):
1993
  """Add an instance to the cluster via either creation or import.
1994

1995
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1996
  @param opts: the command line options selected by the user
1997
  @type args: list
1998
  @param args: should contain only one element, the new instance name
1999
  @rtype: int
2000
  @return: the desired exit code
2001

2002
  """
2003
  instance = args[0]
2004

    
2005
  (pnode, snode) = SplitNodeOption(opts.node)
2006

    
2007
  hypervisor = None
2008
  hvparams = {}
2009
  if opts.hypervisor:
2010
    hypervisor, hvparams = opts.hypervisor
2011

    
2012
  if opts.nics:
2013
    nics = ParseNicOption(opts.nics)
2014
  elif opts.no_nics:
2015
    # no nics
2016
    nics = []
2017
  elif mode == constants.INSTANCE_CREATE:
2018
    # default of one nic, all auto
2019
    nics = [{}]
2020
  else:
2021
    # mode == import
2022
    nics = []
2023

    
2024
  if opts.disk_template == constants.DT_DISKLESS:
2025
    if opts.disks or opts.sd_size is not None:
2026
      raise errors.OpPrereqError("Diskless instance but disk"
2027
                                 " information passed")
2028
    disks = []
2029
  else:
2030
    if (not opts.disks and not opts.sd_size
2031
        and mode == constants.INSTANCE_CREATE):
2032
      raise errors.OpPrereqError("No disk information specified")
2033
    if opts.disks and opts.sd_size is not None:
2034
      raise errors.OpPrereqError("Please use either the '--disk' or"
2035
                                 " '-s' option")
2036
    if opts.sd_size is not None:
2037
      opts.disks = [(0, {"size": opts.sd_size})]
2038

    
2039
    if opts.disks:
2040
      try:
2041
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2042
      except ValueError, err:
2043
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2044
      disks = [{}] * disk_max
2045
    else:
2046
      disks = []
2047
    for didx, ddict in opts.disks:
2048
      didx = int(didx)
2049
      if not isinstance(ddict, dict):
2050
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2051
        raise errors.OpPrereqError(msg)
2052
      elif "size" in ddict:
2053
        if "adopt" in ddict:
2054
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2055
                                     " (disk %d)" % didx)
2056
        try:
2057
          ddict["size"] = utils.ParseUnit(ddict["size"])
2058
        except ValueError, err:
2059
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2060
                                     (didx, err))
2061
      elif "adopt" in ddict:
2062
        if mode == constants.INSTANCE_IMPORT:
2063
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2064
                                     " import")
2065
        ddict["size"] = 0
2066
      else:
2067
        raise errors.OpPrereqError("Missing size or adoption source for"
2068
                                   " disk %d" % didx)
2069
      disks[didx] = ddict
2070

    
2071
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2072
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2073

    
2074
  if mode == constants.INSTANCE_CREATE:
2075
    start = opts.start
2076
    os_type = opts.os
2077
    force_variant = opts.force_variant
2078
    src_node = None
2079
    src_path = None
2080
    no_install = opts.no_install
2081
    identify_defaults = False
2082
  elif mode == constants.INSTANCE_IMPORT:
2083
    start = False
2084
    os_type = None
2085
    force_variant = False
2086
    src_node = opts.src_node
2087
    src_path = opts.src_dir
2088
    no_install = None
2089
    identify_defaults = opts.identify_defaults
2090
  else:
2091
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2092

    
2093
  op = opcodes.OpInstanceCreate(instance_name=instance,
2094
                                disks=disks,
2095
                                disk_template=opts.disk_template,
2096
                                nics=nics,
2097
                                pnode=pnode, snode=snode,
2098
                                ip_check=opts.ip_check,
2099
                                name_check=opts.name_check,
2100
                                wait_for_sync=opts.wait_for_sync,
2101
                                file_storage_dir=opts.file_storage_dir,
2102
                                file_driver=opts.file_driver,
2103
                                iallocator=opts.iallocator,
2104
                                hypervisor=hypervisor,
2105
                                hvparams=hvparams,
2106
                                beparams=opts.beparams,
2107
                                osparams=opts.osparams,
2108
                                mode=mode,
2109
                                start=start,
2110
                                os_type=os_type,
2111
                                force_variant=force_variant,
2112
                                src_node=src_node,
2113
                                src_path=src_path,
2114
                                no_install=no_install,
2115
                                identify_defaults=identify_defaults)
2116

    
2117
  SubmitOrSend(op, opts)
2118
  return 0
2119

    
2120

    
2121
class _RunWhileClusterStoppedHelper:
2122
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2123

2124
  """
2125
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2126
    """Initializes this class.
2127

2128
    @type feedback_fn: callable
2129
    @param feedback_fn: Feedback function
2130
    @type cluster_name: string
2131
    @param cluster_name: Cluster name
2132
    @type master_node: string
2133
    @param master_node Master node name
2134
    @type online_nodes: list
2135
    @param online_nodes: List of names of online nodes
2136

2137
    """
2138
    self.feedback_fn = feedback_fn
2139
    self.cluster_name = cluster_name
2140
    self.master_node = master_node
2141
    self.online_nodes = online_nodes
2142

    
2143
    self.ssh = ssh.SshRunner(self.cluster_name)
2144

    
2145
    self.nonmaster_nodes = [name for name in online_nodes
2146
                            if name != master_node]
2147

    
2148
    assert self.master_node not in self.nonmaster_nodes
2149

    
2150
  def _RunCmd(self, node_name, cmd):
2151
    """Runs a command on the local or a remote machine.
2152

2153
    @type node_name: string
2154
    @param node_name: Machine name
2155
    @type cmd: list
2156
    @param cmd: Command
2157

2158
    """
2159
    if node_name is None or node_name == self.master_node:
2160
      # No need to use SSH
2161
      result = utils.RunCmd(cmd)
2162
    else:
2163
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2164

    
2165
    if result.failed:
2166
      errmsg = ["Failed to run command %s" % result.cmd]
2167
      if node_name:
2168
        errmsg.append("on node %s" % node_name)
2169
      errmsg.append(": exitcode %s and error %s" %
2170
                    (result.exit_code, result.output))
2171
      raise errors.OpExecError(" ".join(errmsg))
2172

    
2173
  def Call(self, fn, *args):
2174
    """Call function while all daemons are stopped.
2175

2176
    @type fn: callable
2177
    @param fn: Function to be called
2178

2179
    """
2180
    # Pause watcher by acquiring an exclusive lock on watcher state file
2181
    self.feedback_fn("Blocking watcher")
2182
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2183
    try:
2184
      # TODO: Currently, this just blocks. There's no timeout.
2185
      # TODO: Should it be a shared lock?
2186
      watcher_block.Exclusive(blocking=True)
2187

    
2188
      # Stop master daemons, so that no new jobs can come in and all running
2189
      # ones are finished
2190
      self.feedback_fn("Stopping master daemons")
2191
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2192
      try:
2193
        # Stop daemons on all nodes
2194
        for node_name in self.online_nodes:
2195
          self.feedback_fn("Stopping daemons on %s" % node_name)
2196
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2197

    
2198
        # All daemons are shut down now
2199
        try:
2200
          return fn(self, *args)
2201
        except Exception, err:
2202
          _, errmsg = FormatError(err)
2203
          logging.exception("Caught exception")
2204
          self.feedback_fn(errmsg)
2205
          raise
2206
      finally:
2207
        # Start cluster again, master node last
2208
        for node_name in self.nonmaster_nodes + [self.master_node]:
2209
          self.feedback_fn("Starting daemons on %s" % node_name)
2210
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2211
    finally:
2212
      # Resume watcher
2213
      watcher_block.Close()
2214

    
2215

    
2216
def RunWhileClusterStopped(feedback_fn, fn, *args):
2217
  """Calls a function while all cluster daemons are stopped.
2218

2219
  @type feedback_fn: callable
2220
  @param feedback_fn: Feedback function
2221
  @type fn: callable
2222
  @param fn: Function to be called when daemons are stopped
2223

2224
  """
2225
  feedback_fn("Gathering cluster information")
2226

    
2227
  # This ensures we're running on the master daemon
2228
  cl = GetClient()
2229

    
2230
  (cluster_name, master_node) = \
2231
    cl.QueryConfigValues(["cluster_name", "master_node"])
2232

    
2233
  online_nodes = GetOnlineNodes([], cl=cl)
2234

    
2235
  # Don't keep a reference to the client. The master daemon will go away.
2236
  del cl
2237

    
2238
  assert master_node in online_nodes
2239

    
2240
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2241
                                       online_nodes).Call(fn, *args)
2242

    
2243

    
2244
def GenerateTable(headers, fields, separator, data,
2245
                  numfields=None, unitfields=None,
2246
                  units=None):
2247
  """Prints a table with headers and different fields.
2248

2249
  @type headers: dict
2250
  @param headers: dictionary mapping field names to headers for
2251
      the table
2252
  @type fields: list
2253
  @param fields: the field names corresponding to each row in
2254
      the data field
2255
  @param separator: the separator to be used; if this is None,
2256
      the default 'smart' algorithm is used which computes optimal
2257
      field width, otherwise just the separator is used between
2258
      each field
2259
  @type data: list
2260
  @param data: a list of lists, each sublist being one row to be output
2261
  @type numfields: list
2262
  @param numfields: a list with the fields that hold numeric
2263
      values and thus should be right-aligned
2264
  @type unitfields: list
2265
  @param unitfields: a list with the fields that hold numeric
2266
      values that should be formatted with the units field
2267
  @type units: string or None
2268
  @param units: the units we should use for formatting, or None for
2269
      automatic choice (human-readable for non-separator usage, otherwise
2270
      megabytes); this is a one-letter string
2271

2272
  """
2273
  if units is None:
2274
    if separator:
2275
      units = "m"
2276
    else:
2277
      units = "h"
2278

    
2279
  if numfields is None:
2280
    numfields = []
2281
  if unitfields is None:
2282
    unitfields = []
2283

    
2284
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2285
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2286

    
2287
  format_fields = []
2288
  for field in fields:
2289
    if headers and field not in headers:
2290
      # TODO: handle better unknown fields (either revert to old
2291
      # style of raising exception, or deal more intelligently with
2292
      # variable fields)
2293
      headers[field] = field
2294
    if separator is not None:
2295
      format_fields.append("%s")
2296
    elif numfields.Matches(field):
2297
      format_fields.append("%*s")
2298
    else:
2299
      format_fields.append("%-*s")
2300

    
2301
  if separator is None:
2302
    mlens = [0 for name in fields]
2303
    format_str = ' '.join(format_fields)
2304
  else:
2305
    format_str = separator.replace("%", "%%").join(format_fields)
2306

    
2307
  for row in data:
2308
    if row is None:
2309
      continue
2310
    for idx, val in enumerate(row):
2311
      if unitfields.Matches(fields[idx]):
2312
        try:
2313
          val = int(val)
2314
        except (TypeError, ValueError):
2315
          pass
2316
        else:
2317
          val = row[idx] = utils.FormatUnit(val, units)
2318
      val = row[idx] = str(val)
2319
      if separator is None:
2320
        mlens[idx] = max(mlens[idx], len(val))
2321

    
2322
  result = []
2323
  if headers:
2324
    args = []
2325
    for idx, name in enumerate(fields):
2326
      hdr = headers[name]
2327
      if separator is None:
2328
        mlens[idx] = max(mlens[idx], len(hdr))
2329
        args.append(mlens[idx])
2330
      args.append(hdr)
2331
    result.append(format_str % tuple(args))
2332

    
2333
  if separator is None:
2334
    assert len(mlens) == len(fields)
2335

    
2336
    if fields and not numfields.Matches(fields[-1]):
2337
      mlens[-1] = 0
2338

    
2339
  for line in data:
2340
    args = []
2341
    if line is None:
2342
      line = ['-' for _ in fields]
2343
    for idx in range(len(fields)):
2344
      if separator is None:
2345
        args.append(mlens[idx])
2346
      args.append(line[idx])
2347
    result.append(format_str % tuple(args))
2348

    
2349
  return result
2350

    
2351

    
2352
def _FormatBool(value):
2353
  """Formats a boolean value as a string.
2354

2355
  """
2356
  if value:
2357
    return "Y"
2358
  return "N"
2359

    
2360

    
2361
#: Default formatting for query results; (callback, align right)
2362
_DEFAULT_FORMAT_QUERY = {
2363
  constants.QFT_TEXT: (str, False),
2364
  constants.QFT_BOOL: (_FormatBool, False),
2365
  constants.QFT_NUMBER: (str, True),
2366
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2367
  constants.QFT_OTHER: (str, False),
2368
  constants.QFT_UNKNOWN: (str, False),
2369
  }
2370

    
2371

    
2372
def _GetColumnFormatter(fdef, override, unit):
2373
  """Returns formatting function for a field.
2374

2375
  @type fdef: L{objects.QueryFieldDefinition}
2376
  @type override: dict
2377
  @param override: Dictionary for overriding field formatting functions,
2378
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2379
  @type unit: string
2380
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2381
  @rtype: tuple; (callable, bool)
2382
  @return: Returns the function to format a value (takes one parameter) and a
2383
    boolean for aligning the value on the right-hand side
2384

2385
  """
2386
  fmt = override.get(fdef.name, None)
2387
  if fmt is not None:
2388
    return fmt
2389

    
2390
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2391

    
2392
  if fdef.kind == constants.QFT_UNIT:
2393
    # Can't keep this information in the static dictionary
2394
    return (lambda value: utils.FormatUnit(value, unit), True)
2395

    
2396
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2397
  if fmt is not None:
2398
    return fmt
2399

    
2400
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2401

    
2402

    
2403
class _QueryColumnFormatter:
2404
  """Callable class for formatting fields of a query.
2405

2406
  """
2407
  def __init__(self, fn, status_fn, verbose):
2408
    """Initializes this class.
2409

2410
    @type fn: callable
2411
    @param fn: Formatting function
2412
    @type status_fn: callable
2413
    @param status_fn: Function to report fields' status
2414
    @type verbose: boolean
2415
    @param verbose: whether to use verbose field descriptions or not
2416

2417
    """
2418
    self._fn = fn
2419
    self._status_fn = status_fn
2420
    self._verbose = verbose
2421

    
2422
  def __call__(self, data):
2423
    """Returns a field's string representation.
2424

2425
    """
2426
    (status, value) = data
2427

    
2428
    # Report status
2429
    self._status_fn(status)
2430

    
2431
    if status == constants.RS_NORMAL:
2432
      return self._fn(value)
2433

    
2434
    assert value is None, \
2435
           "Found value %r for abnormal status %s" % (value, status)
2436

    
2437
    return FormatResultError(status, self._verbose)
2438

    
2439

    
2440
def FormatResultError(status, verbose):
2441
  """Formats result status other than L{constants.RS_NORMAL}.
2442

2443
  @param status: The result status
2444
  @type verbose: boolean
2445
  @param verbose: Whether to return the verbose text
2446
  @return: Text of result status
2447

2448
  """
2449
  assert status != constants.RS_NORMAL, \
2450
         "FormatResultError called with status equal to constants.RS_NORMAL"
2451
  try:
2452
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2453
  except KeyError:
2454
    raise NotImplementedError("Unknown status %s" % status)
2455
  else:
2456
    if verbose:
2457
      return verbose_text
2458
    return normal_text
2459

    
2460

    
2461
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2462
                      header=False, verbose=False):
2463
  """Formats data in L{objects.QueryResponse}.
2464

2465
  @type result: L{objects.QueryResponse}
2466
  @param result: result of query operation
2467
  @type unit: string
2468
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2469
    see L{utils.text.FormatUnit}
2470
  @type format_override: dict
2471
  @param format_override: Dictionary for overriding field formatting functions,
2472
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2473
  @type separator: string or None
2474
  @param separator: String used to separate fields
2475
  @type header: bool
2476
  @param header: Whether to output header row
2477
  @type verbose: boolean
2478
  @param verbose: whether to use verbose field descriptions or not
2479

2480
  """
2481
  if unit is None:
2482
    if separator:
2483
      unit = "m"
2484
    else:
2485
      unit = "h"
2486

    
2487
  if format_override is None:
2488
    format_override = {}
2489

    
2490
  stats = dict.fromkeys(constants.RS_ALL, 0)
2491

    
2492
  def _RecordStatus(status):
2493
    if status in stats:
2494
      stats[status] += 1
2495

    
2496
  columns = []
2497
  for fdef in result.fields:
2498
    assert fdef.title and fdef.name
2499
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2500
    columns.append(TableColumn(fdef.title,
2501
                               _QueryColumnFormatter(fn, _RecordStatus,
2502
                                                     verbose),
2503
                               align_right))
2504

    
2505
  table = FormatTable(result.data, columns, header, separator)
2506

    
2507
  # Collect statistics
2508
  assert len(stats) == len(constants.RS_ALL)
2509
  assert compat.all(count >= 0 for count in stats.values())
2510

    
2511
  # Determine overall status. If there was no data, unknown fields must be
2512
  # detected via the field definitions.
2513
  if (stats[constants.RS_UNKNOWN] or
2514
      (not result.data and _GetUnknownFields(result.fields))):
2515
    status = QR_UNKNOWN
2516
  elif compat.any(count > 0 for key, count in stats.items()
2517
                  if key != constants.RS_NORMAL):
2518
    status = QR_INCOMPLETE
2519
  else:
2520
    status = QR_NORMAL
2521

    
2522
  return (status, table)
2523

    
2524

    
2525
def _GetUnknownFields(fdefs):
2526
  """Returns list of unknown fields included in C{fdefs}.
2527

2528
  @type fdefs: list of L{objects.QueryFieldDefinition}
2529

2530
  """
2531
  return [fdef for fdef in fdefs
2532
          if fdef.kind == constants.QFT_UNKNOWN]
2533

    
2534

    
2535
def _WarnUnknownFields(fdefs):
2536
  """Prints a warning to stderr if a query included unknown fields.
2537

2538
  @type fdefs: list of L{objects.QueryFieldDefinition}
2539

2540
  """
2541
  unknown = _GetUnknownFields(fdefs)
2542
  if unknown:
2543
    ToStderr("Warning: Queried for unknown fields %s",
2544
             utils.CommaJoin(fdef.name for fdef in unknown))
2545
    return True
2546

    
2547
  return False
2548

    
2549

    
2550
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2551
                format_override=None, verbose=False):
2552
  """Generic implementation for listing all items of a resource.
2553

2554
  @param resource: One of L{constants.QR_OP_LUXI}
2555
  @type fields: list of strings
2556
  @param fields: List of fields to query for
2557
  @type names: list of strings
2558
  @param names: Names of items to query for
2559
  @type unit: string or None
2560
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2561
    None for automatic choice (human-readable for non-separator usage,
2562
    otherwise megabytes); this is a one-letter string
2563
  @type separator: string or None
2564
  @param separator: String used to separate fields
2565
  @type header: bool
2566
  @param header: Whether to show header row
2567
  @type format_override: dict
2568
  @param format_override: Dictionary for overriding field formatting functions,
2569
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2570
  @type verbose: boolean
2571
  @param verbose: whether to use verbose field descriptions or not
2572

2573
  """
2574
  if cl is None:
2575
    cl = GetClient()
2576

    
2577
  if not names:
2578
    names = None
2579

    
2580
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2581

    
2582
  found_unknown = _WarnUnknownFields(response.fields)
2583

    
2584
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2585
                                     header=header,
2586
                                     format_override=format_override,
2587
                                     verbose=verbose)
2588

    
2589
  for line in data:
2590
    ToStdout(line)
2591

    
2592
  assert ((found_unknown and status == QR_UNKNOWN) or
2593
          (not found_unknown and status != QR_UNKNOWN))
2594

    
2595
  if status == QR_UNKNOWN:
2596
    return constants.EXIT_UNKNOWN_FIELD
2597

    
2598
  # TODO: Should the list command fail if not all data could be collected?
2599
  return constants.EXIT_SUCCESS
2600

    
2601

    
2602
def GenericListFields(resource, fields, separator, header, cl=None):
2603
  """Generic implementation for listing fields for a resource.
2604

2605
  @param resource: One of L{constants.QR_OP_LUXI}
2606
  @type fields: list of strings
2607
  @param fields: List of fields to query for
2608
  @type separator: string or None
2609
  @param separator: String used to separate fields
2610
  @type header: bool
2611
  @param header: Whether to show header row
2612

2613
  """
2614
  if cl is None:
2615
    cl = GetClient()
2616

    
2617
  if not fields:
2618
    fields = None
2619

    
2620
  response = cl.QueryFields(resource, fields)
2621

    
2622
  found_unknown = _WarnUnknownFields(response.fields)
2623

    
2624
  columns = [
2625
    TableColumn("Name", str, False),
2626
    TableColumn("Title", str, False),
2627
    TableColumn("Description", str, False),
2628
    ]
2629

    
2630
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2631

    
2632
  for line in FormatTable(rows, columns, header, separator):
2633
    ToStdout(line)
2634

    
2635
  if found_unknown:
2636
    return constants.EXIT_UNKNOWN_FIELD
2637

    
2638
  return constants.EXIT_SUCCESS
2639

    
2640

    
2641
class TableColumn:
2642
  """Describes a column for L{FormatTable}.
2643

2644
  """
2645
  def __init__(self, title, fn, align_right):
2646
    """Initializes this class.
2647

2648
    @type title: string
2649
    @param title: Column title
2650
    @type fn: callable
2651
    @param fn: Formatting function
2652
    @type align_right: bool
2653
    @param align_right: Whether to align values on the right-hand side
2654

2655
    """
2656
    self.title = title
2657
    self.format = fn
2658
    self.align_right = align_right
2659

    
2660

    
2661
def _GetColFormatString(width, align_right):
2662
  """Returns the format string for a field.
2663

2664
  """
2665
  if align_right:
2666
    sign = ""
2667
  else:
2668
    sign = "-"
2669

    
2670
  return "%%%s%ss" % (sign, width)
2671

    
2672

    
2673
def FormatTable(rows, columns, header, separator):
2674
  """Formats data as a table.
2675

2676
  @type rows: list of lists
2677
  @param rows: Row data, one list per row
2678
  @type columns: list of L{TableColumn}
2679
  @param columns: Column descriptions
2680
  @type header: bool
2681
  @param header: Whether to show header row
2682
  @type separator: string or None
2683
  @param separator: String used to separate columns
2684

2685
  """
2686
  if header:
2687
    data = [[col.title for col in columns]]
2688
    colwidth = [len(col.title) for col in columns]
2689
  else:
2690
    data = []
2691
    colwidth = [0 for _ in columns]
2692

    
2693
  # Format row data
2694
  for row in rows:
2695
    assert len(row) == len(columns)
2696

    
2697
    formatted = [col.format(value) for value, col in zip(row, columns)]
2698

    
2699
    if separator is None:
2700
      # Update column widths
2701
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2702
        # Modifying a list's items while iterating is fine
2703
        colwidth[idx] = max(oldwidth, len(value))
2704

    
2705
    data.append(formatted)
2706

    
2707
  if separator is not None:
2708
    # Return early if a separator is used
2709
    return [separator.join(row) for row in data]
2710

    
2711
  if columns and not columns[-1].align_right:
2712
    # Avoid unnecessary spaces at end of line
2713
    colwidth[-1] = 0
2714

    
2715
  # Build format string
2716
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2717
                  for col, width in zip(columns, colwidth)])
2718

    
2719
  return [fmt % tuple(row) for row in data]
2720

    
2721

    
2722
def FormatTimestamp(ts):
2723
  """Formats a given timestamp.
2724

2725
  @type ts: timestamp
2726
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2727

2728
  @rtype: string
2729
  @return: a string with the formatted timestamp
2730

2731
  """
2732
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2733
    return '?'
2734
  sec, usec = ts
2735
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2736

    
2737

    
2738
def ParseTimespec(value):
2739
  """Parse a time specification.
2740

2741
  The following suffixed will be recognized:
2742

2743
    - s: seconds
2744
    - m: minutes
2745
    - h: hours
2746
    - d: day
2747
    - w: weeks
2748

2749
  Without any suffix, the value will be taken to be in seconds.
2750

2751
  """
2752
  value = str(value)
2753
  if not value:
2754
    raise errors.OpPrereqError("Empty time specification passed")
2755
  suffix_map = {
2756
    's': 1,
2757
    'm': 60,
2758
    'h': 3600,
2759
    'd': 86400,
2760
    'w': 604800,
2761
    }
2762
  if value[-1] not in suffix_map:
2763
    try:
2764
      value = int(value)
2765
    except (TypeError, ValueError):
2766
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2767
  else:
2768
    multiplier = suffix_map[value[-1]]
2769
    value = value[:-1]
2770
    if not value: # no data left after stripping the suffix
2771
      raise errors.OpPrereqError("Invalid time specification (only"
2772
                                 " suffix passed)")
2773
    try:
2774
      value = int(value) * multiplier
2775
    except (TypeError, ValueError):
2776
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2777
  return value
2778

    
2779

    
2780
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2781
                   filter_master=False):
2782
  """Returns the names of online nodes.
2783

2784
  This function will also log a warning on stderr with the names of
2785
  the online nodes.
2786

2787
  @param nodes: if not empty, use only this subset of nodes (minus the
2788
      offline ones)
2789
  @param cl: if not None, luxi client to use
2790
  @type nowarn: boolean
2791
  @param nowarn: by default, this function will output a note with the
2792
      offline nodes that are skipped; if this parameter is True the
2793
      note is not displayed
2794
  @type secondary_ips: boolean
2795
  @param secondary_ips: if True, return the secondary IPs instead of the
2796
      names, useful for doing network traffic over the replication interface
2797
      (if any)
2798
  @type filter_master: boolean
2799
  @param filter_master: if True, do not return the master node in the list
2800
      (useful in coordination with secondary_ips where we cannot check our
2801
      node name against the list)
2802

2803
  """
2804
  if cl is None:
2805
    cl = GetClient()
2806

    
2807
  if secondary_ips:
2808
    name_idx = 2
2809
  else:
2810
    name_idx = 0
2811

    
2812
  if filter_master:
2813
    master_node = cl.QueryConfigValues(["master_node"])[0]
2814
    filter_fn = lambda x: x != master_node
2815
  else:
2816
    filter_fn = lambda _: True
2817

    
2818
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2819
                         use_locking=False)
2820
  offline = [row[0] for row in result if row[1]]
2821
  if offline and not nowarn:
2822
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2823
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2824

    
2825

    
2826
def _ToStream(stream, txt, *args):
2827
  """Write a message to a stream, bypassing the logging system
2828

2829
  @type stream: file object
2830
  @param stream: the file to which we should write
2831
  @type txt: str
2832
  @param txt: the message
2833

2834
  """
2835
  if args:
2836
    args = tuple(args)
2837
    stream.write(txt % args)
2838
  else:
2839
    stream.write(txt)
2840
  stream.write('\n')
2841
  stream.flush()
2842

    
2843

    
2844
def ToStdout(txt, *args):
2845
  """Write a message to stdout only, bypassing the logging system
2846

2847
  This is just a wrapper over _ToStream.
2848

2849
  @type txt: str
2850
  @param txt: the message
2851

2852
  """
2853
  _ToStream(sys.stdout, txt, *args)
2854

    
2855

    
2856
def ToStderr(txt, *args):
2857
  """Write a message to stderr only, bypassing the logging system
2858

2859
  This is just a wrapper over _ToStream.
2860

2861
  @type txt: str
2862
  @param txt: the message
2863

2864
  """
2865
  _ToStream(sys.stderr, txt, *args)
2866

    
2867

    
2868
class JobExecutor(object):
2869
  """Class which manages the submission and execution of multiple jobs.
2870

2871
  Note that instances of this class should not be reused between
2872
  GetResults() calls.
2873

2874
  """
2875
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2876
    self.queue = []
2877
    if cl is None:
2878
      cl = GetClient()
2879
    self.cl = cl
2880
    self.verbose = verbose
2881
    self.jobs = []
2882
    self.opts = opts
2883
    self.feedback_fn = feedback_fn
2884

    
2885
  def QueueJob(self, name, *ops):
2886
    """Record a job for later submit.
2887

2888
    @type name: string
2889
    @param name: a description of the job, will be used in WaitJobSet
2890
    """
2891
    SetGenericOpcodeOpts(ops, self.opts)
2892
    self.queue.append((name, ops))
2893

    
2894
  def SubmitPending(self, each=False):
2895
    """Submit all pending jobs.
2896

2897
    """
2898
    if each:
2899
      results = []
2900
      for row in self.queue:
2901
        # SubmitJob will remove the success status, but raise an exception if
2902
        # the submission fails, so we'll notice that anyway.
2903
        results.append([True, self.cl.SubmitJob(row[1])])
2904
    else:
2905
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2906
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2907
                                                            self.queue)):
2908
      self.jobs.append((idx, status, data, name))
2909

    
2910
  def _ChooseJob(self):
2911
    """Choose a non-waiting/queued job to poll next.
2912

2913
    """
2914
    assert self.jobs, "_ChooseJob called with empty job list"
2915

    
2916
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2917
    assert result
2918

    
2919
    for job_data, status in zip(self.jobs, result):
2920
      if (isinstance(status, list) and status and
2921
          status[0] in (constants.JOB_STATUS_QUEUED,
2922
                        constants.JOB_STATUS_WAITLOCK,
2923
                        constants.JOB_STATUS_CANCELING)):
2924
        # job is still present and waiting
2925
        continue
2926
      # good candidate found (either running job or lost job)
2927
      self.jobs.remove(job_data)
2928
      return job_data
2929

    
2930
    # no job found
2931
    return self.jobs.pop(0)
2932

    
2933
  def GetResults(self):
2934
    """Wait for and return the results of all jobs.
2935

2936
    @rtype: list
2937
    @return: list of tuples (success, job results), in the same order
2938
        as the submitted jobs; if a job has failed, instead of the result
2939
        there will be the error message
2940

2941
    """
2942
    if not self.jobs:
2943
      self.SubmitPending()
2944
    results = []
2945
    if self.verbose:
2946
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2947
      if ok_jobs:
2948
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2949

    
2950
    # first, remove any non-submitted jobs
2951
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2952
    for idx, _, jid, name in failures:
2953
      ToStderr("Failed to submit job for %s: %s", name, jid)
2954
      results.append((idx, False, jid))
2955

    
2956
    while self.jobs:
2957
      (idx, _, jid, name) = self._ChooseJob()
2958
      ToStdout("Waiting for job %s for %s...", jid, name)
2959
      try:
2960
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2961
        success = True
2962
      except errors.JobLost, err:
2963
        _, job_result = FormatError(err)
2964
        ToStderr("Job %s for %s has been archived, cannot check its result",
2965
                 jid, name)
2966
        success = False
2967
      except (errors.GenericError, luxi.ProtocolError), err:
2968
        _, job_result = FormatError(err)
2969
        success = False
2970
        # the error message will always be shown, verbose or not
2971
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2972

    
2973
      results.append((idx, success, job_result))
2974

    
2975
    # sort based on the index, then drop it
2976
    results.sort()
2977
    results = [i[1:] for i in results]
2978

    
2979
    return results
2980

    
2981
  def WaitOrShow(self, wait):
2982
    """Wait for job results or only print the job IDs.
2983

2984
    @type wait: boolean
2985
    @param wait: whether to wait or not
2986

2987
    """
2988
    if wait:
2989
      return self.GetResults()
2990
    else:
2991
      if not self.jobs:
2992
        self.SubmitPending()
2993
      for _, status, result, name in self.jobs:
2994
        if status:
2995
          ToStdout("%s: %s", result, name)
2996
        else:
2997
          ToStderr("Failure for %s: %s", name, result)
2998
      return [row[1:3] for row in self.jobs]
2999

    
3000

    
3001
def FormatParameterDict(buf, param_dict, actual, level=1):
3002
  """Formats a parameter dictionary.
3003

3004
  @type buf: L{StringIO}
3005
  @param buf: the buffer into which to write
3006
  @type param_dict: dict
3007
  @param param_dict: the own parameters
3008
  @type actual: dict
3009
  @param actual: the current parameter set (including defaults)
3010
  @param level: Level of indent
3011

3012
  """
3013
  indent = "  " * level
3014
  for key in sorted(actual):
3015
    val = param_dict.get(key, "default (%s)" % actual[key])
3016
    buf.write("%s- %s: %s\n" % (indent, key, val))
3017

    
3018

    
3019
def ConfirmOperation(names, list_type, text, extra=""):
3020
  """Ask the user to confirm an operation on a list of list_type.
3021

3022
  This function is used to request confirmation for doing an operation
3023
  on a given list of list_type.
3024

3025
  @type names: list
3026
  @param names: the list of names that we display when
3027
      we ask for confirmation
3028
  @type list_type: str
3029
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3030
  @type text: str
3031
  @param text: the operation that the user should confirm
3032
  @rtype: boolean
3033
  @return: True or False depending on user's confirmation.
3034

3035
  """
3036
  count = len(names)
3037
  msg = ("The %s will operate on %d %s.\n%s"
3038
         "Do you want to continue?" % (text, count, list_type, extra))
3039
  affected = (("\nAffected %s:\n" % list_type) +
3040
              "\n".join(["  %s" % name for name in names]))
3041

    
3042
  choices = [("y", True, "Yes, execute the %s" % text),
3043
             ("n", False, "No, abort the %s" % text)]
3044

    
3045
  if count > 20:
3046
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3047
    question = msg
3048
  else:
3049
    question = msg + affected
3050

    
3051
  choice = AskUser(question, choices)
3052
  if choice == "v":
3053
    choices.pop(1)
3054
    choice = AskUser(msg + affected, choices)
3055
  return choice