Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ cfcc79c6

History | View | Annotate | Download (94.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "AUTO_PROMOTE_OPT",
55
  "AUTO_REPLACE_OPT",
56
  "BACKEND_OPT",
57
  "BLK_OS_OPT",
58
  "CAPAB_MASTER_OPT",
59
  "CAPAB_VM_OPT",
60
  "CLEANUP_OPT",
61
  "CLUSTER_DOMAIN_SECRET_OPT",
62
  "CONFIRM_OPT",
63
  "CP_SIZE_OPT",
64
  "DEBUG_OPT",
65
  "DEBUG_SIMERR_OPT",
66
  "DISKIDX_OPT",
67
  "DISK_OPT",
68
  "DISK_TEMPLATE_OPT",
69
  "DRAINED_OPT",
70
  "DRY_RUN_OPT",
71
  "DRBD_HELPER_OPT",
72
  "EARLY_RELEASE_OPT",
73
  "ENABLED_HV_OPT",
74
  "ERROR_CODES_OPT",
75
  "FIELDS_OPT",
76
  "FILESTORE_DIR_OPT",
77
  "FILESTORE_DRIVER_OPT",
78
  "FORCE_OPT",
79
  "FORCE_VARIANT_OPT",
80
  "GLOBAL_FILEDIR_OPT",
81
  "HID_OS_OPT",
82
  "HVLIST_OPT",
83
  "HVOPTS_OPT",
84
  "HYPERVISOR_OPT",
85
  "IALLOCATOR_OPT",
86
  "DEFAULT_IALLOCATOR_OPT",
87
  "IDENTIFY_DEFAULTS_OPT",
88
  "IGNORE_CONSIST_OPT",
89
  "IGNORE_FAILURES_OPT",
90
  "IGNORE_OFFLINE_OPT",
91
  "IGNORE_REMOVE_FAILURES_OPT",
92
  "IGNORE_SECONDARIES_OPT",
93
  "IGNORE_SIZE_OPT",
94
  "INTERVAL_OPT",
95
  "MAC_PREFIX_OPT",
96
  "MAINTAIN_NODE_HEALTH_OPT",
97
  "MASTER_NETDEV_OPT",
98
  "MC_OPT",
99
  "MIGRATION_MODE_OPT",
100
  "NET_OPT",
101
  "NEW_CLUSTER_CERT_OPT",
102
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103
  "NEW_CONFD_HMAC_KEY_OPT",
104
  "NEW_RAPI_CERT_OPT",
105
  "NEW_SECONDARY_OPT",
106
  "NIC_PARAMS_OPT",
107
  "NODE_LIST_OPT",
108
  "NODE_PLACEMENT_OPT",
109
  "NODEGROUP_OPT",
110
  "NODE_PARAMS_OPT",
111
  "NODE_POWERED_OPT",
112
  "NODRBD_STORAGE_OPT",
113
  "NOHDR_OPT",
114
  "NOIPCHECK_OPT",
115
  "NO_INSTALL_OPT",
116
  "NONAMECHECK_OPT",
117
  "NOLVM_STORAGE_OPT",
118
  "NOMODIFY_ETCHOSTS_OPT",
119
  "NOMODIFY_SSH_SETUP_OPT",
120
  "NONICS_OPT",
121
  "NONLIVE_OPT",
122
  "NONPLUS1_OPT",
123
  "NOSHUTDOWN_OPT",
124
  "NOSTART_OPT",
125
  "NOSSH_KEYCHECK_OPT",
126
  "NOVOTING_OPT",
127
  "NWSYNC_OPT",
128
  "ON_PRIMARY_OPT",
129
  "ON_SECONDARY_OPT",
130
  "OFFLINE_OPT",
131
  "OSPARAMS_OPT",
132
  "OS_OPT",
133
  "OS_SIZE_OPT",
134
  "PREALLOC_WIPE_DISKS_OPT",
135
  "PRIMARY_IP_VERSION_OPT",
136
  "PRIORITY_OPT",
137
  "RAPI_CERT_OPT",
138
  "READD_OPT",
139
  "REBOOT_TYPE_OPT",
140
  "REMOVE_INSTANCE_OPT",
141
  "REMOVE_UIDS_OPT",
142
  "RESERVED_LVS_OPT",
143
  "ROMAN_OPT",
144
  "SECONDARY_IP_OPT",
145
  "SELECT_OS_OPT",
146
  "SEP_OPT",
147
  "SHOWCMD_OPT",
148
  "SHUTDOWN_TIMEOUT_OPT",
149
  "SINGLE_NODE_OPT",
150
  "SRC_DIR_OPT",
151
  "SRC_NODE_OPT",
152
  "SUBMIT_OPT",
153
  "STATIC_OPT",
154
  "SYNC_OPT",
155
  "TAG_SRC_OPT",
156
  "TIMEOUT_OPT",
157
  "UIDPOOL_OPT",
158
  "USEUNITS_OPT",
159
  "USE_REPL_NET_OPT",
160
  "VERBOSE_OPT",
161
  "VG_NAME_OPT",
162
  "YES_DOIT_OPT",
163
  # Generic functions for CLI programs
164
  "GenericMain",
165
  "GenericInstanceCreate",
166
  "GenericList",
167
  "GenericListFields",
168
  "GetClient",
169
  "GetOnlineNodes",
170
  "JobExecutor",
171
  "JobSubmittedException",
172
  "ParseTimespec",
173
  "RunWhileClusterStopped",
174
  "SubmitOpCode",
175
  "SubmitOrSend",
176
  "UsesRPC",
177
  # Formatting functions
178
  "ToStderr", "ToStdout",
179
  "FormatError",
180
  "FormatQueryResult",
181
  "FormatParameterDict",
182
  "GenerateTable",
183
  "AskUser",
184
  "FormatTimestamp",
185
  "FormatLogMessage",
186
  # Tags functions
187
  "ListTags",
188
  "AddTags",
189
  "RemoveTags",
190
  # command line options support infrastructure
191
  "ARGS_MANY_INSTANCES",
192
  "ARGS_MANY_NODES",
193
  "ARGS_MANY_GROUPS",
194
  "ARGS_NONE",
195
  "ARGS_ONE_INSTANCE",
196
  "ARGS_ONE_NODE",
197
  "ARGS_ONE_GROUP",
198
  "ARGS_ONE_OS",
199
  "ArgChoice",
200
  "ArgCommand",
201
  "ArgFile",
202
  "ArgGroup",
203
  "ArgHost",
204
  "ArgInstance",
205
  "ArgJobId",
206
  "ArgNode",
207
  "ArgOs",
208
  "ArgSuggest",
209
  "ArgUnknown",
210
  "OPT_COMPL_INST_ADD_NODES",
211
  "OPT_COMPL_MANY_NODES",
212
  "OPT_COMPL_ONE_IALLOCATOR",
213
  "OPT_COMPL_ONE_INSTANCE",
214
  "OPT_COMPL_ONE_NODE",
215
  "OPT_COMPL_ONE_NODEGROUP",
216
  "OPT_COMPL_ONE_OS",
217
  "cli_option",
218
  "SplitNodeOption",
219
  "CalculateOSNames",
220
  "ParseFields",
221
  "COMMON_CREATE_OPTS",
222
  ]
223

    
224
NO_PREFIX = "no_"
225
UN_PREFIX = "-"
226

    
227
#: Priorities (sorted)
228
_PRIORITY_NAMES = [
229
  ("low", constants.OP_PRIO_LOW),
230
  ("normal", constants.OP_PRIO_NORMAL),
231
  ("high", constants.OP_PRIO_HIGH),
232
  ]
233

    
234
#: Priority dictionary for easier lookup
235
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
236
# we migrate to Python 2.6
237
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
238

    
239
# Query result status for clients
240
(QR_NORMAL,
241
 QR_UNKNOWN,
242
 QR_INCOMPLETE) = range(3)
243

    
244

    
245
class _Argument:
246
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
247
    self.min = min
248
    self.max = max
249

    
250
  def __repr__(self):
251
    return ("<%s min=%s max=%s>" %
252
            (self.__class__.__name__, self.min, self.max))
253

    
254

    
255
class ArgSuggest(_Argument):
256
  """Suggesting argument.
257

258
  Value can be any of the ones passed to the constructor.
259

260
  """
261
  # pylint: disable-msg=W0622
262
  def __init__(self, min=0, max=None, choices=None):
263
    _Argument.__init__(self, min=min, max=max)
264
    self.choices = choices
265

    
266
  def __repr__(self):
267
    return ("<%s min=%s max=%s choices=%r>" %
268
            (self.__class__.__name__, self.min, self.max, self.choices))
269

    
270

    
271
class ArgChoice(ArgSuggest):
272
  """Choice argument.
273

274
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
275
  but value must be one of the choices.
276

277
  """
278

    
279

    
280
class ArgUnknown(_Argument):
281
  """Unknown argument to program (e.g. determined at runtime).
282

283
  """
284

    
285

    
286
class ArgInstance(_Argument):
287
  """Instances argument.
288

289
  """
290

    
291

    
292
class ArgNode(_Argument):
293
  """Node argument.
294

295
  """
296

    
297

    
298
class ArgGroup(_Argument):
299
  """Node group argument.
300

301
  """
302

    
303

    
304
class ArgJobId(_Argument):
305
  """Job ID argument.
306

307
  """
308

    
309

    
310
class ArgFile(_Argument):
311
  """File path argument.
312

313
  """
314

    
315

    
316
class ArgCommand(_Argument):
317
  """Command argument.
318

319
  """
320

    
321

    
322
class ArgHost(_Argument):
323
  """Host argument.
324

325
  """
326

    
327

    
328
class ArgOs(_Argument):
329
  """OS argument.
330

331
  """
332

    
333

    
334
ARGS_NONE = []
335
ARGS_MANY_INSTANCES = [ArgInstance()]
336
ARGS_MANY_NODES = [ArgNode()]
337
ARGS_MANY_GROUPS = [ArgGroup()]
338
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
339
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
340
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
341
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
342

    
343

    
344
def _ExtractTagsObject(opts, args):
345
  """Extract the tag type object.
346

347
  Note that this function will modify its args parameter.
348

349
  """
350
  if not hasattr(opts, "tag_type"):
351
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
352
  kind = opts.tag_type
353
  if kind == constants.TAG_CLUSTER:
354
    retval = kind, kind
355
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
356
    if not args:
357
      raise errors.OpPrereqError("no arguments passed to the command")
358
    name = args.pop(0)
359
    retval = kind, name
360
  else:
361
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
362
  return retval
363

    
364

    
365
def _ExtendTags(opts, args):
366
  """Extend the args if a source file has been given.
367

368
  This function will extend the tags with the contents of the file
369
  passed in the 'tags_source' attribute of the opts parameter. A file
370
  named '-' will be replaced by stdin.
371

372
  """
373
  fname = opts.tags_source
374
  if fname is None:
375
    return
376
  if fname == "-":
377
    new_fh = sys.stdin
378
  else:
379
    new_fh = open(fname, "r")
380
  new_data = []
381
  try:
382
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
383
    # because of python bug 1633941
384
    while True:
385
      line = new_fh.readline()
386
      if not line:
387
        break
388
      new_data.append(line.strip())
389
  finally:
390
    new_fh.close()
391
  args.extend(new_data)
392

    
393

    
394
def ListTags(opts, args):
395
  """List the tags on a given object.
396

397
  This is a generic implementation that knows how to deal with all
398
  three cases of tag objects (cluster, node, instance). The opts
399
  argument is expected to contain a tag_type field denoting what
400
  object type we work on.
401

402
  """
403
  kind, name = _ExtractTagsObject(opts, args)
404
  cl = GetClient()
405
  result = cl.QueryTags(kind, name)
406
  result = list(result)
407
  result.sort()
408
  for tag in result:
409
    ToStdout(tag)
410

    
411

    
412
def AddTags(opts, args):
413
  """Add tags on a given object.
414

415
  This is a generic implementation that knows how to deal with all
416
  three cases of tag objects (cluster, node, instance). The opts
417
  argument is expected to contain a tag_type field denoting what
418
  object type we work on.
419

420
  """
421
  kind, name = _ExtractTagsObject(opts, args)
422
  _ExtendTags(opts, args)
423
  if not args:
424
    raise errors.OpPrereqError("No tags to be added")
425
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
426
  SubmitOpCode(op, opts=opts)
427

    
428

    
429
def RemoveTags(opts, args):
430
  """Remove tags from a given object.
431

432
  This is a generic implementation that knows how to deal with all
433
  three cases of tag objects (cluster, node, instance). The opts
434
  argument is expected to contain a tag_type field denoting what
435
  object type we work on.
436

437
  """
438
  kind, name = _ExtractTagsObject(opts, args)
439
  _ExtendTags(opts, args)
440
  if not args:
441
    raise errors.OpPrereqError("No tags to be removed")
442
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
443
  SubmitOpCode(op, opts=opts)
444

    
445

    
446
def check_unit(option, opt, value): # pylint: disable-msg=W0613
447
  """OptParsers custom converter for units.
448

449
  """
450
  try:
451
    return utils.ParseUnit(value)
452
  except errors.UnitParseError, err:
453
    raise OptionValueError("option %s: %s" % (opt, err))
454

    
455

    
456
def _SplitKeyVal(opt, data):
457
  """Convert a KeyVal string into a dict.
458

459
  This function will convert a key=val[,...] string into a dict. Empty
460
  values will be converted specially: keys which have the prefix 'no_'
461
  will have the value=False and the prefix stripped, the others will
462
  have value=True.
463

464
  @type opt: string
465
  @param opt: a string holding the option name for which we process the
466
      data, used in building error messages
467
  @type data: string
468
  @param data: a string of the format key=val,key=val,...
469
  @rtype: dict
470
  @return: {key=val, key=val}
471
  @raises errors.ParameterError: if there are duplicate keys
472

473
  """
474
  kv_dict = {}
475
  if data:
476
    for elem in utils.UnescapeAndSplit(data, sep=","):
477
      if "=" in elem:
478
        key, val = elem.split("=", 1)
479
      else:
480
        if elem.startswith(NO_PREFIX):
481
          key, val = elem[len(NO_PREFIX):], False
482
        elif elem.startswith(UN_PREFIX):
483
          key, val = elem[len(UN_PREFIX):], None
484
        else:
485
          key, val = elem, True
486
      if key in kv_dict:
487
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
488
                                    (key, opt))
489
      kv_dict[key] = val
490
  return kv_dict
491

    
492

    
493
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
494
  """Custom parser for ident:key=val,key=val options.
495

496
  This will store the parsed values as a tuple (ident, {key: val}). As such,
497
  multiple uses of this option via action=append is possible.
498

499
  """
500
  if ":" not in value:
501
    ident, rest = value, ''
502
  else:
503
    ident, rest = value.split(":", 1)
504

    
505
  if ident.startswith(NO_PREFIX):
506
    if rest:
507
      msg = "Cannot pass options when removing parameter groups: %s" % value
508
      raise errors.ParameterError(msg)
509
    retval = (ident[len(NO_PREFIX):], False)
510
  elif ident.startswith(UN_PREFIX):
511
    if rest:
512
      msg = "Cannot pass options when removing parameter groups: %s" % value
513
      raise errors.ParameterError(msg)
514
    retval = (ident[len(UN_PREFIX):], None)
515
  else:
516
    kv_dict = _SplitKeyVal(opt, rest)
517
    retval = (ident, kv_dict)
518
  return retval
519

    
520

    
521
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
522
  """Custom parser class for key=val,key=val options.
523

524
  This will store the parsed values as a dict {key: val}.
525

526
  """
527
  return _SplitKeyVal(opt, value)
528

    
529

    
530
def check_bool(option, opt, value): # pylint: disable-msg=W0613
531
  """Custom parser for yes/no options.
532

533
  This will store the parsed value as either True or False.
534

535
  """
536
  value = value.lower()
537
  if value == constants.VALUE_FALSE or value == "no":
538
    return False
539
  elif value == constants.VALUE_TRUE or value == "yes":
540
    return True
541
  else:
542
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
543

    
544

    
545
# completion_suggestion is normally a list. Using numeric values not evaluating
546
# to False for dynamic completion.
547
(OPT_COMPL_MANY_NODES,
548
 OPT_COMPL_ONE_NODE,
549
 OPT_COMPL_ONE_INSTANCE,
550
 OPT_COMPL_ONE_OS,
551
 OPT_COMPL_ONE_IALLOCATOR,
552
 OPT_COMPL_INST_ADD_NODES,
553
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
554

    
555
OPT_COMPL_ALL = frozenset([
556
  OPT_COMPL_MANY_NODES,
557
  OPT_COMPL_ONE_NODE,
558
  OPT_COMPL_ONE_INSTANCE,
559
  OPT_COMPL_ONE_OS,
560
  OPT_COMPL_ONE_IALLOCATOR,
561
  OPT_COMPL_INST_ADD_NODES,
562
  OPT_COMPL_ONE_NODEGROUP,
563
  ])
564

    
565

    
566
class CliOption(Option):
567
  """Custom option class for optparse.
568

569
  """
570
  ATTRS = Option.ATTRS + [
571
    "completion_suggest",
572
    ]
573
  TYPES = Option.TYPES + (
574
    "identkeyval",
575
    "keyval",
576
    "unit",
577
    "bool",
578
    )
579
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
580
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
581
  TYPE_CHECKER["keyval"] = check_key_val
582
  TYPE_CHECKER["unit"] = check_unit
583
  TYPE_CHECKER["bool"] = check_bool
584

    
585

    
586
# optparse.py sets make_option, so we do it for our own option class, too
587
cli_option = CliOption
588

    
589

    
590
_YORNO = "yes|no"
591

    
592
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
593
                       help="Increase debugging level")
594

    
595
NOHDR_OPT = cli_option("--no-headers", default=False,
596
                       action="store_true", dest="no_headers",
597
                       help="Don't display column headers")
598

    
599
SEP_OPT = cli_option("--separator", default=None,
600
                     action="store", dest="separator",
601
                     help=("Separator between output fields"
602
                           " (defaults to one space)"))
603

    
604
USEUNITS_OPT = cli_option("--units", default=None,
605
                          dest="units", choices=('h', 'm', 'g', 't'),
606
                          help="Specify units for output (one of h/m/g/t)")
607

    
608
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
609
                        type="string", metavar="FIELDS",
610
                        help="Comma separated list of output fields")
611

    
612
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
613
                       default=False, help="Force the operation")
614

    
615
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
616
                         default=False, help="Do not require confirmation")
617

    
618
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
619
                                  action="store_true", default=False,
620
                                  help=("Ignore offline nodes and do as much"
621
                                        " as possible"))
622

    
623
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
624
                         default=None, help="File with tag names")
625

    
626
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
627
                        default=False, action="store_true",
628
                        help=("Submit the job and return the job ID, but"
629
                              " don't wait for the job to finish"))
630

    
631
SYNC_OPT = cli_option("--sync", dest="do_locking",
632
                      default=False, action="store_true",
633
                      help=("Grab locks while doing the queries"
634
                            " in order to ensure more consistent results"))
635

    
636
DRY_RUN_OPT = cli_option("--dry-run", default=False,
637
                         action="store_true",
638
                         help=("Do not execute the operation, just run the"
639
                               " check steps and verify it it could be"
640
                               " executed"))
641

    
642
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
643
                         action="store_true",
644
                         help="Increase the verbosity of the operation")
645

    
646
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
647
                              action="store_true", dest="simulate_errors",
648
                              help="Debugging option that makes the operation"
649
                              " treat most runtime checks as failed")
650

    
651
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
652
                        default=True, action="store_false",
653
                        help="Don't wait for sync (DANGEROUS!)")
654

    
655
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
656
                               help="Custom disk setup (diskless, file,"
657
                               " plain or drbd)",
658
                               default=None, metavar="TEMPL",
659
                               choices=list(constants.DISK_TEMPLATES))
660

    
661
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
662
                        help="Do not create any network cards for"
663
                        " the instance")
664

    
665
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
666
                               help="Relative path under default cluster-wide"
667
                               " file storage dir to store file-based disks",
668
                               default=None, metavar="<DIR>")
669

    
670
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
671
                                  help="Driver to use for image files",
672
                                  default="loop", metavar="<DRIVER>",
673
                                  choices=list(constants.FILE_DRIVER))
674

    
675
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
676
                            help="Select nodes for the instance automatically"
677
                            " using the <NAME> iallocator plugin",
678
                            default=None, type="string",
679
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
680

    
681
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
682
                            metavar="<NAME>",
683
                            help="Set the default instance allocator plugin",
684
                            default=None, type="string",
685
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
686

    
687
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
688
                    metavar="<os>",
689
                    completion_suggest=OPT_COMPL_ONE_OS)
690

    
691
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
692
                         type="keyval", default={},
693
                         help="OS parameters")
694

    
695
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
696
                               action="store_true", default=False,
697
                               help="Force an unknown variant")
698

    
699
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
700
                            action="store_true", default=False,
701
                            help="Do not install the OS (will"
702
                            " enable no-start)")
703

    
704
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
705
                         type="keyval", default={},
706
                         help="Backend parameters")
707

    
708
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
709
                         default={}, dest="hvparams",
710
                         help="Hypervisor parameters")
711

    
712
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
713
                            help="Hypervisor and hypervisor options, in the"
714
                            " format hypervisor:option=value,option=value,...",
715
                            default=None, type="identkeyval")
716

    
717
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
718
                        help="Hypervisor and hypervisor options, in the"
719
                        " format hypervisor:option=value,option=value,...",
720
                        default=[], action="append", type="identkeyval")
721

    
722
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
723
                           action="store_false",
724
                           help="Don't check that the instance's IP"
725
                           " is alive")
726

    
727
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
728
                             default=True, action="store_false",
729
                             help="Don't check that the instance's name"
730
                             " is resolvable")
731

    
732
NET_OPT = cli_option("--net",
733
                     help="NIC parameters", default=[],
734
                     dest="nics", action="append", type="identkeyval")
735

    
736
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
737
                      dest="disks", action="append", type="identkeyval")
738

    
739
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
740
                         help="Comma-separated list of disks"
741
                         " indices to act on (e.g. 0,2) (optional,"
742
                         " defaults to all disks)")
743

    
744
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
745
                         help="Enforces a single-disk configuration using the"
746
                         " given disk size, in MiB unless a suffix is used",
747
                         default=None, type="unit", metavar="<size>")
748

    
749
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
750
                                dest="ignore_consistency",
751
                                action="store_true", default=False,
752
                                help="Ignore the consistency of the disks on"
753
                                " the secondary")
754

    
755
NONLIVE_OPT = cli_option("--non-live", dest="live",
756
                         default=True, action="store_false",
757
                         help="Do a non-live migration (this usually means"
758
                         " freeze the instance, save the state, transfer and"
759
                         " only then resume running on the secondary node)")
760

    
761
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
762
                                default=None,
763
                                choices=list(constants.HT_MIGRATION_MODES),
764
                                help="Override default migration mode (choose"
765
                                " either live or non-live")
766

    
767
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
768
                                help="Target node and optional secondary node",
769
                                metavar="<pnode>[:<snode>]",
770
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
771

    
772
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
773
                           action="append", metavar="<node>",
774
                           help="Use only this node (can be used multiple"
775
                           " times, if not given defaults to all nodes)",
776
                           completion_suggest=OPT_COMPL_ONE_NODE)
777

    
778
NODEGROUP_OPT = cli_option("-g", "--node-group",
779
                           dest="nodegroup",
780
                           help="Node group (name or uuid)",
781
                           metavar="<nodegroup>",
782
                           default=None, type="string",
783
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
784

    
785
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
786
                             metavar="<node>",
787
                             completion_suggest=OPT_COMPL_ONE_NODE)
788

    
789
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
790
                         action="store_false",
791
                         help="Don't start the instance after creation")
792

    
793
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
794
                         action="store_true", default=False,
795
                         help="Show command instead of executing it")
796

    
797
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
798
                         default=False, action="store_true",
799
                         help="Instead of performing the migration, try to"
800
                         " recover from a failed cleanup. This is safe"
801
                         " to run even if the instance is healthy, but it"
802
                         " will create extra replication traffic and "
803
                         " disrupt briefly the replication (like during the"
804
                         " migration")
805

    
806
STATIC_OPT = cli_option("-s", "--static", dest="static",
807
                        action="store_true", default=False,
808
                        help="Only show configuration data, not runtime data")
809

    
810
ALL_OPT = cli_option("--all", dest="show_all",
811
                     default=False, action="store_true",
812
                     help="Show info on all instances on the cluster."
813
                     " This can take a long time to run, use wisely")
814

    
815
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
816
                           action="store_true", default=False,
817
                           help="Interactive OS reinstall, lists available"
818
                           " OS templates for selection")
819

    
820
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
821
                                 action="store_true", default=False,
822
                                 help="Remove the instance from the cluster"
823
                                 " configuration even if there are failures"
824
                                 " during the removal process")
825

    
826
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
827
                                        dest="ignore_remove_failures",
828
                                        action="store_true", default=False,
829
                                        help="Remove the instance from the"
830
                                        " cluster configuration even if there"
831
                                        " are failures during the removal"
832
                                        " process")
833

    
834
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
835
                                 action="store_true", default=False,
836
                                 help="Remove the instance from the cluster")
837

    
838
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
839
                               help="Specifies the new secondary node",
840
                               metavar="NODE", default=None,
841
                               completion_suggest=OPT_COMPL_ONE_NODE)
842

    
843
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
844
                            default=False, action="store_true",
845
                            help="Replace the disk(s) on the primary"
846
                            " node (only for the drbd template)")
847

    
848
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
849
                              default=False, action="store_true",
850
                              help="Replace the disk(s) on the secondary"
851
                              " node (only for the drbd template)")
852

    
853
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
854
                              default=False, action="store_true",
855
                              help="Lock all nodes and auto-promote as needed"
856
                              " to MC status")
857

    
858
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
859
                              default=False, action="store_true",
860
                              help="Automatically replace faulty disks"
861
                              " (only for the drbd template)")
862

    
863
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
864
                             default=False, action="store_true",
865
                             help="Ignore current recorded size"
866
                             " (useful for forcing activation when"
867
                             " the recorded size is wrong)")
868

    
869
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
870
                          metavar="<node>",
871
                          completion_suggest=OPT_COMPL_ONE_NODE)
872

    
873
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
874
                         metavar="<dir>")
875

    
876
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
877
                              help="Specify the secondary ip for the node",
878
                              metavar="ADDRESS", default=None)
879

    
880
READD_OPT = cli_option("--readd", dest="readd",
881
                       default=False, action="store_true",
882
                       help="Readd old node after replacing it")
883

    
884
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
885
                                default=True, action="store_false",
886
                                help="Disable SSH key fingerprint checking")
887

    
888
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
889
                    type="bool", default=None, metavar=_YORNO,
890
                    help="Set the master_candidate flag on the node")
891

    
892
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
893
                         type="bool", default=None,
894
                         help=("Set the offline flag on the node"
895
                               " (cluster does not communicate with offline"
896
                               " nodes)"))
897

    
898
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
899
                         type="bool", default=None,
900
                         help=("Set the drained flag on the node"
901
                               " (excluded from allocation operations)"))
902

    
903
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
904
                    type="bool", default=None, metavar=_YORNO,
905
                    help="Set the master_capable flag on the node")
906

    
907
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
908
                    type="bool", default=None, metavar=_YORNO,
909
                    help="Set the vm_capable flag on the node")
910

    
911
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
912
                             type="bool", default=None, metavar=_YORNO,
913
                             help="Set the allocatable flag on a volume")
914

    
915
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
916
                               help="Disable support for lvm based instances"
917
                               " (cluster-wide)",
918
                               action="store_false", default=True)
919

    
920
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
921
                            dest="enabled_hypervisors",
922
                            help="Comma-separated list of hypervisors",
923
                            type="string", default=None)
924

    
925
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
926
                            type="keyval", default={},
927
                            help="NIC parameters")
928

    
929
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
930
                         dest="candidate_pool_size", type="int",
931
                         help="Set the candidate pool size")
932

    
933
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
934
                         help=("Enables LVM and specifies the volume group"
935
                               " name (cluster-wide) for disk allocation"
936
                               " [%s]" % constants.DEFAULT_VG),
937
                         metavar="VG", default=None)
938

    
939
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
940
                          help="Destroy cluster", action="store_true")
941

    
942
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
943
                          help="Skip node agreement check (dangerous)",
944
                          action="store_true", default=False)
945

    
946
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
947
                            help="Specify the mac prefix for the instance IP"
948
                            " addresses, in the format XX:XX:XX",
949
                            metavar="PREFIX",
950
                            default=None)
951

    
952
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
953
                               help="Specify the node interface (cluster-wide)"
954
                               " on which the master IP address will be added"
955
                               " (cluster init default: %s)" %
956
                               constants.DEFAULT_BRIDGE,
957
                               metavar="NETDEV",
958
                               default=None)
959

    
960
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
961
                                help="Specify the default directory (cluster-"
962
                                "wide) for storing the file-based disks [%s]" %
963
                                constants.DEFAULT_FILE_STORAGE_DIR,
964
                                metavar="DIR",
965
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
966

    
967
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
968
                                   help="Don't modify /etc/hosts",
969
                                   action="store_false", default=True)
970

    
971
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
972
                                    help="Don't initialize SSH keys",
973
                                    action="store_false", default=True)
974

    
975
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
976
                             help="Enable parseable error messages",
977
                             action="store_true", default=False)
978

    
979
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
980
                          help="Skip N+1 memory redundancy tests",
981
                          action="store_true", default=False)
982

    
983
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
984
                             help="Type of reboot: soft/hard/full",
985
                             default=constants.INSTANCE_REBOOT_HARD,
986
                             metavar="<REBOOT>",
987
                             choices=list(constants.REBOOT_TYPES))
988

    
989
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
990
                                    dest="ignore_secondaries",
991
                                    default=False, action="store_true",
992
                                    help="Ignore errors from secondaries")
993

    
994
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
995
                            action="store_false", default=True,
996
                            help="Don't shutdown the instance (unsafe)")
997

    
998
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
999
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1000
                         help="Maximum time to wait")
1001

    
1002
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1003
                         dest="shutdown_timeout", type="int",
1004
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1005
                         help="Maximum time to wait for instance shutdown")
1006

    
1007
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1008
                          default=None,
1009
                          help=("Number of seconds between repetions of the"
1010
                                " command"))
1011

    
1012
EARLY_RELEASE_OPT = cli_option("--early-release",
1013
                               dest="early_release", default=False,
1014
                               action="store_true",
1015
                               help="Release the locks on the secondary"
1016
                               " node(s) early")
1017

    
1018
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1019
                                  dest="new_cluster_cert",
1020
                                  default=False, action="store_true",
1021
                                  help="Generate a new cluster certificate")
1022

    
1023
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1024
                           default=None,
1025
                           help="File containing new RAPI certificate")
1026

    
1027
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1028
                               default=None, action="store_true",
1029
                               help=("Generate a new self-signed RAPI"
1030
                                     " certificate"))
1031

    
1032
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1033
                                    dest="new_confd_hmac_key",
1034
                                    default=False, action="store_true",
1035
                                    help=("Create a new HMAC key for %s" %
1036
                                          constants.CONFD))
1037

    
1038
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1039
                                       dest="cluster_domain_secret",
1040
                                       default=None,
1041
                                       help=("Load new new cluster domain"
1042
                                             " secret from file"))
1043

    
1044
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1045
                                           dest="new_cluster_domain_secret",
1046
                                           default=False, action="store_true",
1047
                                           help=("Create a new cluster domain"
1048
                                                 " secret"))
1049

    
1050
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1051
                              dest="use_replication_network",
1052
                              help="Whether to use the replication network"
1053
                              " for talking to the nodes",
1054
                              action="store_true", default=False)
1055

    
1056
MAINTAIN_NODE_HEALTH_OPT = \
1057
    cli_option("--maintain-node-health", dest="maintain_node_health",
1058
               metavar=_YORNO, default=None, type="bool",
1059
               help="Configure the cluster to automatically maintain node"
1060
               " health, by shutting down unknown instances, shutting down"
1061
               " unknown DRBD devices, etc.")
1062

    
1063
IDENTIFY_DEFAULTS_OPT = \
1064
    cli_option("--identify-defaults", dest="identify_defaults",
1065
               default=False, action="store_true",
1066
               help="Identify which saved instance parameters are equal to"
1067
               " the current cluster defaults and set them as such, instead"
1068
               " of marking them as overridden")
1069

    
1070
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1071
                         action="store", dest="uid_pool",
1072
                         help=("A list of user-ids or user-id"
1073
                               " ranges separated by commas"))
1074

    
1075
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1076
                          action="store", dest="add_uids",
1077
                          help=("A list of user-ids or user-id"
1078
                                " ranges separated by commas, to be"
1079
                                " added to the user-id pool"))
1080

    
1081
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1082
                             action="store", dest="remove_uids",
1083
                             help=("A list of user-ids or user-id"
1084
                                   " ranges separated by commas, to be"
1085
                                   " removed from the user-id pool"))
1086

    
1087
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1088
                             action="store", dest="reserved_lvs",
1089
                             help=("A comma-separated list of reserved"
1090
                                   " logical volumes names, that will be"
1091
                                   " ignored by cluster verify"))
1092

    
1093
ROMAN_OPT = cli_option("--roman",
1094
                       dest="roman_integers", default=False,
1095
                       action="store_true",
1096
                       help="Use roman numbers for positive integers")
1097

    
1098
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1099
                             action="store", default=None,
1100
                             help="Specifies usermode helper for DRBD")
1101

    
1102
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1103
                                action="store_false", default=True,
1104
                                help="Disable support for DRBD")
1105

    
1106
PRIMARY_IP_VERSION_OPT = \
1107
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1108
               action="store", dest="primary_ip_version",
1109
               metavar="%d|%d" % (constants.IP4_VERSION,
1110
                                  constants.IP6_VERSION),
1111
               help="Cluster-wide IP version for primary IP")
1112

    
1113
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1114
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1115
                          choices=_PRIONAME_TO_VALUE.keys(),
1116
                          help="Priority for opcode processing")
1117

    
1118
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1119
                        type="bool", default=None, metavar=_YORNO,
1120
                        help="Sets the hidden flag on the OS")
1121

    
1122
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1123
                        type="bool", default=None, metavar=_YORNO,
1124
                        help="Sets the blacklisted flag on the OS")
1125

    
1126
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1127
                                     type="bool", metavar=_YORNO,
1128
                                     dest="prealloc_wipe_disks",
1129
                                     help=("Wipe disks prior to instance"
1130
                                           " creation"))
1131

    
1132
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1133
                             type="keyval", default=None,
1134
                             help="Node parameters")
1135

    
1136
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1137
                              action="store", metavar="POLICY", default=None,
1138
                              help="Allocation policy for the node group")
1139

    
1140
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1141
                              type="bool", metavar=_YORNO,
1142
                              dest="node_powered",
1143
                              help="Specify if the SoR for node is powered")
1144

    
1145

    
1146
#: Options provided by all commands
1147
COMMON_OPTS = [DEBUG_OPT]
1148

    
1149
# common options for creating instances. add and import then add their own
1150
# specific ones.
1151
COMMON_CREATE_OPTS = [
1152
  BACKEND_OPT,
1153
  DISK_OPT,
1154
  DISK_TEMPLATE_OPT,
1155
  FILESTORE_DIR_OPT,
1156
  FILESTORE_DRIVER_OPT,
1157
  HYPERVISOR_OPT,
1158
  IALLOCATOR_OPT,
1159
  NET_OPT,
1160
  NODE_PLACEMENT_OPT,
1161
  NOIPCHECK_OPT,
1162
  NONAMECHECK_OPT,
1163
  NONICS_OPT,
1164
  NWSYNC_OPT,
1165
  OSPARAMS_OPT,
1166
  OS_SIZE_OPT,
1167
  SUBMIT_OPT,
1168
  DRY_RUN_OPT,
1169
  PRIORITY_OPT,
1170
  ]
1171

    
1172

    
1173
def _ParseArgs(argv, commands, aliases):
1174
  """Parser for the command line arguments.
1175

1176
  This function parses the arguments and returns the function which
1177
  must be executed together with its (modified) arguments.
1178

1179
  @param argv: the command line
1180
  @param commands: dictionary with special contents, see the design
1181
      doc for cmdline handling
1182
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1183

1184
  """
1185
  if len(argv) == 0:
1186
    binary = "<command>"
1187
  else:
1188
    binary = argv[0].split("/")[-1]
1189

    
1190
  if len(argv) > 1 and argv[1] == "--version":
1191
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1192
             constants.RELEASE_VERSION)
1193
    # Quit right away. That way we don't have to care about this special
1194
    # argument. optparse.py does it the same.
1195
    sys.exit(0)
1196

    
1197
  if len(argv) < 2 or not (argv[1] in commands or
1198
                           argv[1] in aliases):
1199
    # let's do a nice thing
1200
    sortedcmds = commands.keys()
1201
    sortedcmds.sort()
1202

    
1203
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1204
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1205
    ToStdout("")
1206

    
1207
    # compute the max line length for cmd + usage
1208
    mlen = max([len(" %s" % cmd) for cmd in commands])
1209
    mlen = min(60, mlen) # should not get here...
1210

    
1211
    # and format a nice command list
1212
    ToStdout("Commands:")
1213
    for cmd in sortedcmds:
1214
      cmdstr = " %s" % (cmd,)
1215
      help_text = commands[cmd][4]
1216
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1217
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1218
      for line in help_lines:
1219
        ToStdout("%-*s   %s", mlen, "", line)
1220

    
1221
    ToStdout("")
1222

    
1223
    return None, None, None
1224

    
1225
  # get command, unalias it, and look it up in commands
1226
  cmd = argv.pop(1)
1227
  if cmd in aliases:
1228
    if cmd in commands:
1229
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1230
                                   " command" % cmd)
1231

    
1232
    if aliases[cmd] not in commands:
1233
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1234
                                   " command '%s'" % (cmd, aliases[cmd]))
1235

    
1236
    cmd = aliases[cmd]
1237

    
1238
  func, args_def, parser_opts, usage, description = commands[cmd]
1239
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1240
                        description=description,
1241
                        formatter=TitledHelpFormatter(),
1242
                        usage="%%prog %s %s" % (cmd, usage))
1243
  parser.disable_interspersed_args()
1244
  options, args = parser.parse_args()
1245

    
1246
  if not _CheckArguments(cmd, args_def, args):
1247
    return None, None, None
1248

    
1249
  return func, options, args
1250

    
1251

    
1252
def _CheckArguments(cmd, args_def, args):
1253
  """Verifies the arguments using the argument definition.
1254

1255
  Algorithm:
1256

1257
    1. Abort with error if values specified by user but none expected.
1258

1259
    1. For each argument in definition
1260

1261
      1. Keep running count of minimum number of values (min_count)
1262
      1. Keep running count of maximum number of values (max_count)
1263
      1. If it has an unlimited number of values
1264

1265
        1. Abort with error if it's not the last argument in the definition
1266

1267
    1. If last argument has limited number of values
1268

1269
      1. Abort with error if number of values doesn't match or is too large
1270

1271
    1. Abort with error if user didn't pass enough values (min_count)
1272

1273
  """
1274
  if args and not args_def:
1275
    ToStderr("Error: Command %s expects no arguments", cmd)
1276
    return False
1277

    
1278
  min_count = None
1279
  max_count = None
1280
  check_max = None
1281

    
1282
  last_idx = len(args_def) - 1
1283

    
1284
  for idx, arg in enumerate(args_def):
1285
    if min_count is None:
1286
      min_count = arg.min
1287
    elif arg.min is not None:
1288
      min_count += arg.min
1289

    
1290
    if max_count is None:
1291
      max_count = arg.max
1292
    elif arg.max is not None:
1293
      max_count += arg.max
1294

    
1295
    if idx == last_idx:
1296
      check_max = (arg.max is not None)
1297

    
1298
    elif arg.max is None:
1299
      raise errors.ProgrammerError("Only the last argument can have max=None")
1300

    
1301
  if check_max:
1302
    # Command with exact number of arguments
1303
    if (min_count is not None and max_count is not None and
1304
        min_count == max_count and len(args) != min_count):
1305
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1306
      return False
1307

    
1308
    # Command with limited number of arguments
1309
    if max_count is not None and len(args) > max_count:
1310
      ToStderr("Error: Command %s expects only %d argument(s)",
1311
               cmd, max_count)
1312
      return False
1313

    
1314
  # Command with some required arguments
1315
  if min_count is not None and len(args) < min_count:
1316
    ToStderr("Error: Command %s expects at least %d argument(s)",
1317
             cmd, min_count)
1318
    return False
1319

    
1320
  return True
1321

    
1322

    
1323
def SplitNodeOption(value):
1324
  """Splits the value of a --node option.
1325

1326
  """
1327
  if value and ':' in value:
1328
    return value.split(':', 1)
1329
  else:
1330
    return (value, None)
1331

    
1332

    
1333
def CalculateOSNames(os_name, os_variants):
1334
  """Calculates all the names an OS can be called, according to its variants.
1335

1336
  @type os_name: string
1337
  @param os_name: base name of the os
1338
  @type os_variants: list or None
1339
  @param os_variants: list of supported variants
1340
  @rtype: list
1341
  @return: list of valid names
1342

1343
  """
1344
  if os_variants:
1345
    return ['%s+%s' % (os_name, v) for v in os_variants]
1346
  else:
1347
    return [os_name]
1348

    
1349

    
1350
def ParseFields(selected, default):
1351
  """Parses the values of "--field"-like options.
1352

1353
  @type selected: string or None
1354
  @param selected: User-selected options
1355
  @type default: list
1356
  @param default: Default fields
1357

1358
  """
1359
  if selected is None:
1360
    return default
1361

    
1362
  if selected.startswith("+"):
1363
    return default + selected[1:].split(",")
1364

    
1365
  return selected.split(",")
1366

    
1367

    
1368
UsesRPC = rpc.RunWithRPC
1369

    
1370

    
1371
def AskUser(text, choices=None):
1372
  """Ask the user a question.
1373

1374
  @param text: the question to ask
1375

1376
  @param choices: list with elements tuples (input_char, return_value,
1377
      description); if not given, it will default to: [('y', True,
1378
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1379
      note that the '?' char is reserved for help
1380

1381
  @return: one of the return values from the choices list; if input is
1382
      not possible (i.e. not running with a tty, we return the last
1383
      entry from the list
1384

1385
  """
1386
  if choices is None:
1387
    choices = [('y', True, 'Perform the operation'),
1388
               ('n', False, 'Do not perform the operation')]
1389
  if not choices or not isinstance(choices, list):
1390
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1391
  for entry in choices:
1392
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1393
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1394

    
1395
  answer = choices[-1][1]
1396
  new_text = []
1397
  for line in text.splitlines():
1398
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1399
  text = "\n".join(new_text)
1400
  try:
1401
    f = file("/dev/tty", "a+")
1402
  except IOError:
1403
    return answer
1404
  try:
1405
    chars = [entry[0] for entry in choices]
1406
    chars[-1] = "[%s]" % chars[-1]
1407
    chars.append('?')
1408
    maps = dict([(entry[0], entry[1]) for entry in choices])
1409
    while True:
1410
      f.write(text)
1411
      f.write('\n')
1412
      f.write("/".join(chars))
1413
      f.write(": ")
1414
      line = f.readline(2).strip().lower()
1415
      if line in maps:
1416
        answer = maps[line]
1417
        break
1418
      elif line == '?':
1419
        for entry in choices:
1420
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1421
        f.write("\n")
1422
        continue
1423
  finally:
1424
    f.close()
1425
  return answer
1426

    
1427

    
1428
class JobSubmittedException(Exception):
1429
  """Job was submitted, client should exit.
1430

1431
  This exception has one argument, the ID of the job that was
1432
  submitted. The handler should print this ID.
1433

1434
  This is not an error, just a structured way to exit from clients.
1435

1436
  """
1437

    
1438

    
1439
def SendJob(ops, cl=None):
1440
  """Function to submit an opcode without waiting for the results.
1441

1442
  @type ops: list
1443
  @param ops: list of opcodes
1444
  @type cl: luxi.Client
1445
  @param cl: the luxi client to use for communicating with the master;
1446
             if None, a new client will be created
1447

1448
  """
1449
  if cl is None:
1450
    cl = GetClient()
1451

    
1452
  job_id = cl.SubmitJob(ops)
1453

    
1454
  return job_id
1455

    
1456

    
1457
def GenericPollJob(job_id, cbs, report_cbs):
1458
  """Generic job-polling function.
1459

1460
  @type job_id: number
1461
  @param job_id: Job ID
1462
  @type cbs: Instance of L{JobPollCbBase}
1463
  @param cbs: Data callbacks
1464
  @type report_cbs: Instance of L{JobPollReportCbBase}
1465
  @param report_cbs: Reporting callbacks
1466

1467
  """
1468
  prev_job_info = None
1469
  prev_logmsg_serial = None
1470

    
1471
  status = None
1472

    
1473
  while True:
1474
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1475
                                      prev_logmsg_serial)
1476
    if not result:
1477
      # job not found, go away!
1478
      raise errors.JobLost("Job with id %s lost" % job_id)
1479

    
1480
    if result == constants.JOB_NOTCHANGED:
1481
      report_cbs.ReportNotChanged(job_id, status)
1482

    
1483
      # Wait again
1484
      continue
1485

    
1486
    # Split result, a tuple of (field values, log entries)
1487
    (job_info, log_entries) = result
1488
    (status, ) = job_info
1489

    
1490
    if log_entries:
1491
      for log_entry in log_entries:
1492
        (serial, timestamp, log_type, message) = log_entry
1493
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1494
                                    log_type, message)
1495
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1496

    
1497
    # TODO: Handle canceled and archived jobs
1498
    elif status in (constants.JOB_STATUS_SUCCESS,
1499
                    constants.JOB_STATUS_ERROR,
1500
                    constants.JOB_STATUS_CANCELING,
1501
                    constants.JOB_STATUS_CANCELED):
1502
      break
1503

    
1504
    prev_job_info = job_info
1505

    
1506
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1507
  if not jobs:
1508
    raise errors.JobLost("Job with id %s lost" % job_id)
1509

    
1510
  status, opstatus, result = jobs[0]
1511

    
1512
  if status == constants.JOB_STATUS_SUCCESS:
1513
    return result
1514

    
1515
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1516
    raise errors.OpExecError("Job was canceled")
1517

    
1518
  has_ok = False
1519
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1520
    if status == constants.OP_STATUS_SUCCESS:
1521
      has_ok = True
1522
    elif status == constants.OP_STATUS_ERROR:
1523
      errors.MaybeRaise(msg)
1524

    
1525
      if has_ok:
1526
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1527
                                 (idx, msg))
1528

    
1529
      raise errors.OpExecError(str(msg))
1530

    
1531
  # default failure mode
1532
  raise errors.OpExecError(result)
1533

    
1534

    
1535
class JobPollCbBase:
1536
  """Base class for L{GenericPollJob} callbacks.
1537

1538
  """
1539
  def __init__(self):
1540
    """Initializes this class.
1541

1542
    """
1543

    
1544
  def WaitForJobChangeOnce(self, job_id, fields,
1545
                           prev_job_info, prev_log_serial):
1546
    """Waits for changes on a job.
1547

1548
    """
1549
    raise NotImplementedError()
1550

    
1551
  def QueryJobs(self, job_ids, fields):
1552
    """Returns the selected fields for the selected job IDs.
1553

1554
    @type job_ids: list of numbers
1555
    @param job_ids: Job IDs
1556
    @type fields: list of strings
1557
    @param fields: Fields
1558

1559
    """
1560
    raise NotImplementedError()
1561

    
1562

    
1563
class JobPollReportCbBase:
1564
  """Base class for L{GenericPollJob} reporting callbacks.
1565

1566
  """
1567
  def __init__(self):
1568
    """Initializes this class.
1569

1570
    """
1571

    
1572
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1573
    """Handles a log message.
1574

1575
    """
1576
    raise NotImplementedError()
1577

    
1578
  def ReportNotChanged(self, job_id, status):
1579
    """Called for if a job hasn't changed in a while.
1580

1581
    @type job_id: number
1582
    @param job_id: Job ID
1583
    @type status: string or None
1584
    @param status: Job status if available
1585

1586
    """
1587
    raise NotImplementedError()
1588

    
1589

    
1590
class _LuxiJobPollCb(JobPollCbBase):
1591
  def __init__(self, cl):
1592
    """Initializes this class.
1593

1594
    """
1595
    JobPollCbBase.__init__(self)
1596
    self.cl = cl
1597

    
1598
  def WaitForJobChangeOnce(self, job_id, fields,
1599
                           prev_job_info, prev_log_serial):
1600
    """Waits for changes on a job.
1601

1602
    """
1603
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1604
                                        prev_job_info, prev_log_serial)
1605

    
1606
  def QueryJobs(self, job_ids, fields):
1607
    """Returns the selected fields for the selected job IDs.
1608

1609
    """
1610
    return self.cl.QueryJobs(job_ids, fields)
1611

    
1612

    
1613
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1614
  def __init__(self, feedback_fn):
1615
    """Initializes this class.
1616

1617
    """
1618
    JobPollReportCbBase.__init__(self)
1619

    
1620
    self.feedback_fn = feedback_fn
1621

    
1622
    assert callable(feedback_fn)
1623

    
1624
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1625
    """Handles a log message.
1626

1627
    """
1628
    self.feedback_fn((timestamp, log_type, log_msg))
1629

    
1630
  def ReportNotChanged(self, job_id, status):
1631
    """Called if a job hasn't changed in a while.
1632

1633
    """
1634
    # Ignore
1635

    
1636

    
1637
class StdioJobPollReportCb(JobPollReportCbBase):
1638
  def __init__(self):
1639
    """Initializes this class.
1640

1641
    """
1642
    JobPollReportCbBase.__init__(self)
1643

    
1644
    self.notified_queued = False
1645
    self.notified_waitlock = False
1646

    
1647
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1648
    """Handles a log message.
1649

1650
    """
1651
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1652
             FormatLogMessage(log_type, log_msg))
1653

    
1654
  def ReportNotChanged(self, job_id, status):
1655
    """Called if a job hasn't changed in a while.
1656

1657
    """
1658
    if status is None:
1659
      return
1660

    
1661
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1662
      ToStderr("Job %s is waiting in queue", job_id)
1663
      self.notified_queued = True
1664

    
1665
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1666
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1667
      self.notified_waitlock = True
1668

    
1669

    
1670
def FormatLogMessage(log_type, log_msg):
1671
  """Formats a job message according to its type.
1672

1673
  """
1674
  if log_type != constants.ELOG_MESSAGE:
1675
    log_msg = str(log_msg)
1676

    
1677
  return utils.SafeEncode(log_msg)
1678

    
1679

    
1680
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1681
  """Function to poll for the result of a job.
1682

1683
  @type job_id: job identified
1684
  @param job_id: the job to poll for results
1685
  @type cl: luxi.Client
1686
  @param cl: the luxi client to use for communicating with the master;
1687
             if None, a new client will be created
1688

1689
  """
1690
  if cl is None:
1691
    cl = GetClient()
1692

    
1693
  if reporter is None:
1694
    if feedback_fn:
1695
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1696
    else:
1697
      reporter = StdioJobPollReportCb()
1698
  elif feedback_fn:
1699
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1700

    
1701
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1702

    
1703

    
1704
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1705
  """Legacy function to submit an opcode.
1706

1707
  This is just a simple wrapper over the construction of the processor
1708
  instance. It should be extended to better handle feedback and
1709
  interaction functions.
1710

1711
  """
1712
  if cl is None:
1713
    cl = GetClient()
1714

    
1715
  SetGenericOpcodeOpts([op], opts)
1716

    
1717
  job_id = SendJob([op], cl=cl)
1718

    
1719
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1720
                       reporter=reporter)
1721

    
1722
  return op_results[0]
1723

    
1724

    
1725
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1726
  """Wrapper around SubmitOpCode or SendJob.
1727

1728
  This function will decide, based on the 'opts' parameter, whether to
1729
  submit and wait for the result of the opcode (and return it), or
1730
  whether to just send the job and print its identifier. It is used in
1731
  order to simplify the implementation of the '--submit' option.
1732

1733
  It will also process the opcodes if we're sending the via SendJob
1734
  (otherwise SubmitOpCode does it).
1735

1736
  """
1737
  if opts and opts.submit_only:
1738
    job = [op]
1739
    SetGenericOpcodeOpts(job, opts)
1740
    job_id = SendJob(job, cl=cl)
1741
    raise JobSubmittedException(job_id)
1742
  else:
1743
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1744

    
1745

    
1746
def SetGenericOpcodeOpts(opcode_list, options):
1747
  """Processor for generic options.
1748

1749
  This function updates the given opcodes based on generic command
1750
  line options (like debug, dry-run, etc.).
1751

1752
  @param opcode_list: list of opcodes
1753
  @param options: command line options or None
1754
  @return: None (in-place modification)
1755

1756
  """
1757
  if not options:
1758
    return
1759
  for op in opcode_list:
1760
    op.debug_level = options.debug
1761
    if hasattr(options, "dry_run"):
1762
      op.dry_run = options.dry_run
1763
    if getattr(options, "priority", None) is not None:
1764
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1765

    
1766

    
1767
def GetClient():
1768
  # TODO: Cache object?
1769
  try:
1770
    client = luxi.Client()
1771
  except luxi.NoMasterError:
1772
    ss = ssconf.SimpleStore()
1773

    
1774
    # Try to read ssconf file
1775
    try:
1776
      ss.GetMasterNode()
1777
    except errors.ConfigurationError:
1778
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1779
                                 " not part of a cluster")
1780

    
1781
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1782
    if master != myself:
1783
      raise errors.OpPrereqError("This is not the master node, please connect"
1784
                                 " to node '%s' and rerun the command" %
1785
                                 master)
1786
    raise
1787
  return client
1788

    
1789

    
1790
def FormatError(err):
1791
  """Return a formatted error message for a given error.
1792

1793
  This function takes an exception instance and returns a tuple
1794
  consisting of two values: first, the recommended exit code, and
1795
  second, a string describing the error message (not
1796
  newline-terminated).
1797

1798
  """
1799
  retcode = 1
1800
  obuf = StringIO()
1801
  msg = str(err)
1802
  if isinstance(err, errors.ConfigurationError):
1803
    txt = "Corrupt configuration file: %s" % msg
1804
    logging.error(txt)
1805
    obuf.write(txt + "\n")
1806
    obuf.write("Aborting.")
1807
    retcode = 2
1808
  elif isinstance(err, errors.HooksAbort):
1809
    obuf.write("Failure: hooks execution failed:\n")
1810
    for node, script, out in err.args[0]:
1811
      if out:
1812
        obuf.write("  node: %s, script: %s, output: %s\n" %
1813
                   (node, script, out))
1814
      else:
1815
        obuf.write("  node: %s, script: %s (no output)\n" %
1816
                   (node, script))
1817
  elif isinstance(err, errors.HooksFailure):
1818
    obuf.write("Failure: hooks general failure: %s" % msg)
1819
  elif isinstance(err, errors.ResolverError):
1820
    this_host = netutils.Hostname.GetSysName()
1821
    if err.args[0] == this_host:
1822
      msg = "Failure: can't resolve my own hostname ('%s')"
1823
    else:
1824
      msg = "Failure: can't resolve hostname '%s'"
1825
    obuf.write(msg % err.args[0])
1826
  elif isinstance(err, errors.OpPrereqError):
1827
    if len(err.args) == 2:
1828
      obuf.write("Failure: prerequisites not met for this"
1829
               " operation:\nerror type: %s, error details:\n%s" %
1830
                 (err.args[1], err.args[0]))
1831
    else:
1832
      obuf.write("Failure: prerequisites not met for this"
1833
                 " operation:\n%s" % msg)
1834
  elif isinstance(err, errors.OpExecError):
1835
    obuf.write("Failure: command execution error:\n%s" % msg)
1836
  elif isinstance(err, errors.TagError):
1837
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1838
  elif isinstance(err, errors.JobQueueDrainError):
1839
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1840
               " accept new requests\n")
1841
  elif isinstance(err, errors.JobQueueFull):
1842
    obuf.write("Failure: the job queue is full and doesn't accept new"
1843
               " job submissions until old jobs are archived\n")
1844
  elif isinstance(err, errors.TypeEnforcementError):
1845
    obuf.write("Parameter Error: %s" % msg)
1846
  elif isinstance(err, errors.ParameterError):
1847
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1848
  elif isinstance(err, luxi.NoMasterError):
1849
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1850
               " and listening for connections?")
1851
  elif isinstance(err, luxi.TimeoutError):
1852
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1853
               " been submitted and will continue to run even if the call"
1854
               " timed out. Useful commands in this situation are \"gnt-job"
1855
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1856
    obuf.write(msg)
1857
  elif isinstance(err, luxi.PermissionError):
1858
    obuf.write("It seems you don't have permissions to connect to the"
1859
               " master daemon.\nPlease retry as a different user.")
1860
  elif isinstance(err, luxi.ProtocolError):
1861
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1862
               "%s" % msg)
1863
  elif isinstance(err, errors.JobLost):
1864
    obuf.write("Error checking job status: %s" % msg)
1865
  elif isinstance(err, errors.GenericError):
1866
    obuf.write("Unhandled Ganeti error: %s" % msg)
1867
  elif isinstance(err, JobSubmittedException):
1868
    obuf.write("JobID: %s\n" % err.args[0])
1869
    retcode = 0
1870
  else:
1871
    obuf.write("Unhandled exception: %s" % msg)
1872
  return retcode, obuf.getvalue().rstrip('\n')
1873

    
1874

    
1875
def GenericMain(commands, override=None, aliases=None):
1876
  """Generic main function for all the gnt-* commands.
1877

1878
  Arguments:
1879
    - commands: a dictionary with a special structure, see the design doc
1880
                for command line handling.
1881
    - override: if not None, we expect a dictionary with keys that will
1882
                override command line options; this can be used to pass
1883
                options from the scripts to generic functions
1884
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1885

1886
  """
1887
  # save the program name and the entire command line for later logging
1888
  if sys.argv:
1889
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1890
    if len(sys.argv) >= 2:
1891
      binary += " " + sys.argv[1]
1892
      old_cmdline = " ".join(sys.argv[2:])
1893
    else:
1894
      old_cmdline = ""
1895
  else:
1896
    binary = "<unknown program>"
1897
    old_cmdline = ""
1898

    
1899
  if aliases is None:
1900
    aliases = {}
1901

    
1902
  try:
1903
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1904
  except errors.ParameterError, err:
1905
    result, err_msg = FormatError(err)
1906
    ToStderr(err_msg)
1907
    return 1
1908

    
1909
  if func is None: # parse error
1910
    return 1
1911

    
1912
  if override is not None:
1913
    for key, val in override.iteritems():
1914
      setattr(options, key, val)
1915

    
1916
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1917
                     stderr_logging=True)
1918

    
1919
  if old_cmdline:
1920
    logging.info("run with arguments '%s'", old_cmdline)
1921
  else:
1922
    logging.info("run with no arguments")
1923

    
1924
  try:
1925
    result = func(options, args)
1926
  except (errors.GenericError, luxi.ProtocolError,
1927
          JobSubmittedException), err:
1928
    result, err_msg = FormatError(err)
1929
    logging.exception("Error during command processing")
1930
    ToStderr(err_msg)
1931

    
1932
  return result
1933

    
1934

    
1935
def ParseNicOption(optvalue):
1936
  """Parses the value of the --net option(s).
1937

1938
  """
1939
  try:
1940
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1941
  except (TypeError, ValueError), err:
1942
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1943

    
1944
  nics = [{}] * nic_max
1945
  for nidx, ndict in optvalue:
1946
    nidx = int(nidx)
1947

    
1948
    if not isinstance(ndict, dict):
1949
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1950
                                 " got %s" % (nidx, ndict))
1951

    
1952
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1953

    
1954
    nics[nidx] = ndict
1955

    
1956
  return nics
1957

    
1958

    
1959
def GenericInstanceCreate(mode, opts, args):
1960
  """Add an instance to the cluster via either creation or import.
1961

1962
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1963
  @param opts: the command line options selected by the user
1964
  @type args: list
1965
  @param args: should contain only one element, the new instance name
1966
  @rtype: int
1967
  @return: the desired exit code
1968

1969
  """
1970
  instance = args[0]
1971

    
1972
  (pnode, snode) = SplitNodeOption(opts.node)
1973

    
1974
  hypervisor = None
1975
  hvparams = {}
1976
  if opts.hypervisor:
1977
    hypervisor, hvparams = opts.hypervisor
1978

    
1979
  if opts.nics:
1980
    nics = ParseNicOption(opts.nics)
1981
  elif opts.no_nics:
1982
    # no nics
1983
    nics = []
1984
  elif mode == constants.INSTANCE_CREATE:
1985
    # default of one nic, all auto
1986
    nics = [{}]
1987
  else:
1988
    # mode == import
1989
    nics = []
1990

    
1991
  if opts.disk_template == constants.DT_DISKLESS:
1992
    if opts.disks or opts.sd_size is not None:
1993
      raise errors.OpPrereqError("Diskless instance but disk"
1994
                                 " information passed")
1995
    disks = []
1996
  else:
1997
    if (not opts.disks and not opts.sd_size
1998
        and mode == constants.INSTANCE_CREATE):
1999
      raise errors.OpPrereqError("No disk information specified")
2000
    if opts.disks and opts.sd_size is not None:
2001
      raise errors.OpPrereqError("Please use either the '--disk' or"
2002
                                 " '-s' option")
2003
    if opts.sd_size is not None:
2004
      opts.disks = [(0, {"size": opts.sd_size})]
2005

    
2006
    if opts.disks:
2007
      try:
2008
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2009
      except ValueError, err:
2010
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2011
      disks = [{}] * disk_max
2012
    else:
2013
      disks = []
2014
    for didx, ddict in opts.disks:
2015
      didx = int(didx)
2016
      if not isinstance(ddict, dict):
2017
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2018
        raise errors.OpPrereqError(msg)
2019
      elif "size" in ddict:
2020
        if "adopt" in ddict:
2021
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2022
                                     " (disk %d)" % didx)
2023
        try:
2024
          ddict["size"] = utils.ParseUnit(ddict["size"])
2025
        except ValueError, err:
2026
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2027
                                     (didx, err))
2028
      elif "adopt" in ddict:
2029
        if mode == constants.INSTANCE_IMPORT:
2030
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2031
                                     " import")
2032
        ddict["size"] = 0
2033
      else:
2034
        raise errors.OpPrereqError("Missing size or adoption source for"
2035
                                   " disk %d" % didx)
2036
      disks[didx] = ddict
2037

    
2038
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2039
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2040

    
2041
  if mode == constants.INSTANCE_CREATE:
2042
    start = opts.start
2043
    os_type = opts.os
2044
    force_variant = opts.force_variant
2045
    src_node = None
2046
    src_path = None
2047
    no_install = opts.no_install
2048
    identify_defaults = False
2049
  elif mode == constants.INSTANCE_IMPORT:
2050
    start = False
2051
    os_type = None
2052
    force_variant = False
2053
    src_node = opts.src_node
2054
    src_path = opts.src_dir
2055
    no_install = None
2056
    identify_defaults = opts.identify_defaults
2057
  else:
2058
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2059

    
2060
  op = opcodes.OpInstanceCreate(instance_name=instance,
2061
                                disks=disks,
2062
                                disk_template=opts.disk_template,
2063
                                nics=nics,
2064
                                pnode=pnode, snode=snode,
2065
                                ip_check=opts.ip_check,
2066
                                name_check=opts.name_check,
2067
                                wait_for_sync=opts.wait_for_sync,
2068
                                file_storage_dir=opts.file_storage_dir,
2069
                                file_driver=opts.file_driver,
2070
                                iallocator=opts.iallocator,
2071
                                hypervisor=hypervisor,
2072
                                hvparams=hvparams,
2073
                                beparams=opts.beparams,
2074
                                osparams=opts.osparams,
2075
                                mode=mode,
2076
                                start=start,
2077
                                os_type=os_type,
2078
                                force_variant=force_variant,
2079
                                src_node=src_node,
2080
                                src_path=src_path,
2081
                                no_install=no_install,
2082
                                identify_defaults=identify_defaults)
2083

    
2084
  SubmitOrSend(op, opts)
2085
  return 0
2086

    
2087

    
2088
class _RunWhileClusterStoppedHelper:
2089
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2090

2091
  """
2092
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2093
    """Initializes this class.
2094

2095
    @type feedback_fn: callable
2096
    @param feedback_fn: Feedback function
2097
    @type cluster_name: string
2098
    @param cluster_name: Cluster name
2099
    @type master_node: string
2100
    @param master_node Master node name
2101
    @type online_nodes: list
2102
    @param online_nodes: List of names of online nodes
2103

2104
    """
2105
    self.feedback_fn = feedback_fn
2106
    self.cluster_name = cluster_name
2107
    self.master_node = master_node
2108
    self.online_nodes = online_nodes
2109

    
2110
    self.ssh = ssh.SshRunner(self.cluster_name)
2111

    
2112
    self.nonmaster_nodes = [name for name in online_nodes
2113
                            if name != master_node]
2114

    
2115
    assert self.master_node not in self.nonmaster_nodes
2116

    
2117
  def _RunCmd(self, node_name, cmd):
2118
    """Runs a command on the local or a remote machine.
2119

2120
    @type node_name: string
2121
    @param node_name: Machine name
2122
    @type cmd: list
2123
    @param cmd: Command
2124

2125
    """
2126
    if node_name is None or node_name == self.master_node:
2127
      # No need to use SSH
2128
      result = utils.RunCmd(cmd)
2129
    else:
2130
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2131

    
2132
    if result.failed:
2133
      errmsg = ["Failed to run command %s" % result.cmd]
2134
      if node_name:
2135
        errmsg.append("on node %s" % node_name)
2136
      errmsg.append(": exitcode %s and error %s" %
2137
                    (result.exit_code, result.output))
2138
      raise errors.OpExecError(" ".join(errmsg))
2139

    
2140
  def Call(self, fn, *args):
2141
    """Call function while all daemons are stopped.
2142

2143
    @type fn: callable
2144
    @param fn: Function to be called
2145

2146
    """
2147
    # Pause watcher by acquiring an exclusive lock on watcher state file
2148
    self.feedback_fn("Blocking watcher")
2149
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2150
    try:
2151
      # TODO: Currently, this just blocks. There's no timeout.
2152
      # TODO: Should it be a shared lock?
2153
      watcher_block.Exclusive(blocking=True)
2154

    
2155
      # Stop master daemons, so that no new jobs can come in and all running
2156
      # ones are finished
2157
      self.feedback_fn("Stopping master daemons")
2158
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2159
      try:
2160
        # Stop daemons on all nodes
2161
        for node_name in self.online_nodes:
2162
          self.feedback_fn("Stopping daemons on %s" % node_name)
2163
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2164

    
2165
        # All daemons are shut down now
2166
        try:
2167
          return fn(self, *args)
2168
        except Exception, err:
2169
          _, errmsg = FormatError(err)
2170
          logging.exception("Caught exception")
2171
          self.feedback_fn(errmsg)
2172
          raise
2173
      finally:
2174
        # Start cluster again, master node last
2175
        for node_name in self.nonmaster_nodes + [self.master_node]:
2176
          self.feedback_fn("Starting daemons on %s" % node_name)
2177
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2178
    finally:
2179
      # Resume watcher
2180
      watcher_block.Close()
2181

    
2182

    
2183
def RunWhileClusterStopped(feedback_fn, fn, *args):
2184
  """Calls a function while all cluster daemons are stopped.
2185

2186
  @type feedback_fn: callable
2187
  @param feedback_fn: Feedback function
2188
  @type fn: callable
2189
  @param fn: Function to be called when daemons are stopped
2190

2191
  """
2192
  feedback_fn("Gathering cluster information")
2193

    
2194
  # This ensures we're running on the master daemon
2195
  cl = GetClient()
2196

    
2197
  (cluster_name, master_node) = \
2198
    cl.QueryConfigValues(["cluster_name", "master_node"])
2199

    
2200
  online_nodes = GetOnlineNodes([], cl=cl)
2201

    
2202
  # Don't keep a reference to the client. The master daemon will go away.
2203
  del cl
2204

    
2205
  assert master_node in online_nodes
2206

    
2207
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2208
                                       online_nodes).Call(fn, *args)
2209

    
2210

    
2211
def GenerateTable(headers, fields, separator, data,
2212
                  numfields=None, unitfields=None,
2213
                  units=None):
2214
  """Prints a table with headers and different fields.
2215

2216
  @type headers: dict
2217
  @param headers: dictionary mapping field names to headers for
2218
      the table
2219
  @type fields: list
2220
  @param fields: the field names corresponding to each row in
2221
      the data field
2222
  @param separator: the separator to be used; if this is None,
2223
      the default 'smart' algorithm is used which computes optimal
2224
      field width, otherwise just the separator is used between
2225
      each field
2226
  @type data: list
2227
  @param data: a list of lists, each sublist being one row to be output
2228
  @type numfields: list
2229
  @param numfields: a list with the fields that hold numeric
2230
      values and thus should be right-aligned
2231
  @type unitfields: list
2232
  @param unitfields: a list with the fields that hold numeric
2233
      values that should be formatted with the units field
2234
  @type units: string or None
2235
  @param units: the units we should use for formatting, or None for
2236
      automatic choice (human-readable for non-separator usage, otherwise
2237
      megabytes); this is a one-letter string
2238

2239
  """
2240
  if units is None:
2241
    if separator:
2242
      units = "m"
2243
    else:
2244
      units = "h"
2245

    
2246
  if numfields is None:
2247
    numfields = []
2248
  if unitfields is None:
2249
    unitfields = []
2250

    
2251
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2252
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2253

    
2254
  format_fields = []
2255
  for field in fields:
2256
    if headers and field not in headers:
2257
      # TODO: handle better unknown fields (either revert to old
2258
      # style of raising exception, or deal more intelligently with
2259
      # variable fields)
2260
      headers[field] = field
2261
    if separator is not None:
2262
      format_fields.append("%s")
2263
    elif numfields.Matches(field):
2264
      format_fields.append("%*s")
2265
    else:
2266
      format_fields.append("%-*s")
2267

    
2268
  if separator is None:
2269
    mlens = [0 for name in fields]
2270
    format_str = ' '.join(format_fields)
2271
  else:
2272
    format_str = separator.replace("%", "%%").join(format_fields)
2273

    
2274
  for row in data:
2275
    if row is None:
2276
      continue
2277
    for idx, val in enumerate(row):
2278
      if unitfields.Matches(fields[idx]):
2279
        try:
2280
          val = int(val)
2281
        except (TypeError, ValueError):
2282
          pass
2283
        else:
2284
          val = row[idx] = utils.FormatUnit(val, units)
2285
      val = row[idx] = str(val)
2286
      if separator is None:
2287
        mlens[idx] = max(mlens[idx], len(val))
2288

    
2289
  result = []
2290
  if headers:
2291
    args = []
2292
    for idx, name in enumerate(fields):
2293
      hdr = headers[name]
2294
      if separator is None:
2295
        mlens[idx] = max(mlens[idx], len(hdr))
2296
        args.append(mlens[idx])
2297
      args.append(hdr)
2298
    result.append(format_str % tuple(args))
2299

    
2300
  if separator is None:
2301
    assert len(mlens) == len(fields)
2302

    
2303
    if fields and not numfields.Matches(fields[-1]):
2304
      mlens[-1] = 0
2305

    
2306
  for line in data:
2307
    args = []
2308
    if line is None:
2309
      line = ['-' for _ in fields]
2310
    for idx in range(len(fields)):
2311
      if separator is None:
2312
        args.append(mlens[idx])
2313
      args.append(line[idx])
2314
    result.append(format_str % tuple(args))
2315

    
2316
  return result
2317

    
2318

    
2319
def _FormatBool(value):
2320
  """Formats a boolean value as a string.
2321

2322
  """
2323
  if value:
2324
    return "Y"
2325
  return "N"
2326

    
2327

    
2328
#: Default formatting for query results; (callback, align right)
2329
_DEFAULT_FORMAT_QUERY = {
2330
  constants.QFT_TEXT: (str, False),
2331
  constants.QFT_BOOL: (_FormatBool, False),
2332
  constants.QFT_NUMBER: (str, True),
2333
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2334
  constants.QFT_OTHER: (str, False),
2335
  constants.QFT_UNKNOWN: (str, False),
2336
  }
2337

    
2338

    
2339
def _GetColumnFormatter(fdef, override, unit):
2340
  """Returns formatting function for a field.
2341

2342
  @type fdef: L{objects.QueryFieldDefinition}
2343
  @type override: dict
2344
  @param override: Dictionary for overriding field formatting functions,
2345
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2346
  @type unit: string
2347
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2348
  @rtype: tuple; (callable, bool)
2349
  @return: Returns the function to format a value (takes one parameter) and a
2350
    boolean for aligning the value on the right-hand side
2351

2352
  """
2353
  fmt = override.get(fdef.name, None)
2354
  if fmt is not None:
2355
    return fmt
2356

    
2357
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2358

    
2359
  if fdef.kind == constants.QFT_UNIT:
2360
    # Can't keep this information in the static dictionary
2361
    return (lambda value: utils.FormatUnit(value, unit), True)
2362

    
2363
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2364
  if fmt is not None:
2365
    return fmt
2366

    
2367
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2368

    
2369

    
2370
class _QueryColumnFormatter:
2371
  """Callable class for formatting fields of a query.
2372

2373
  """
2374
  def __init__(self, fn, status_fn):
2375
    """Initializes this class.
2376

2377
    @type fn: callable
2378
    @param fn: Formatting function
2379
    @type status_fn: callable
2380
    @param status_fn: Function to report fields' status
2381

2382
    """
2383
    self._fn = fn
2384
    self._status_fn = status_fn
2385

    
2386
  def __call__(self, data):
2387
    """Returns a field's string representation.
2388

2389
    """
2390
    (status, value) = data
2391

    
2392
    # Report status
2393
    self._status_fn(status)
2394

    
2395
    if status == constants.RS_NORMAL:
2396
      return self._fn(value)
2397

    
2398
    assert value is None, \
2399
           "Found value %r for abnormal status %s" % (value, status)
2400

    
2401
    if status == constants.RS_UNKNOWN:
2402
      return "(unknown)"
2403

    
2404
    if status == constants.RS_NODATA:
2405
      return "(nodata)"
2406

    
2407
    if status == constants.RS_UNAVAIL:
2408
      return "(unavail)"
2409

    
2410
    if status == constants.RS_OFFLINE:
2411
      return "(offline)"
2412

    
2413
    raise NotImplementedError("Unknown status %s" % status)
2414

    
2415

    
2416
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2417
                      header=False):
2418
  """Formats data in L{objects.QueryResponse}.
2419

2420
  @type result: L{objects.QueryResponse}
2421
  @param result: result of query operation
2422
  @type unit: string
2423
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2424
    see L{utils.text.FormatUnit}
2425
  @type format_override: dict
2426
  @param format_override: Dictionary for overriding field formatting functions,
2427
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2428
  @type separator: string or None
2429
  @param separator: String used to separate fields
2430
  @type header: bool
2431
  @param header: Whether to output header row
2432

2433
  """
2434
  if unit is None:
2435
    if separator:
2436
      unit = "m"
2437
    else:
2438
      unit = "h"
2439

    
2440
  if format_override is None:
2441
    format_override = {}
2442

    
2443
  stats = dict.fromkeys(constants.RS_ALL, 0)
2444

    
2445
  def _RecordStatus(status):
2446
    if status in stats:
2447
      stats[status] += 1
2448

    
2449
  columns = []
2450
  for fdef in result.fields:
2451
    assert fdef.title and fdef.name
2452
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2453
    columns.append(TableColumn(fdef.title,
2454
                               _QueryColumnFormatter(fn, _RecordStatus),
2455
                               align_right))
2456

    
2457
  table = FormatTable(result.data, columns, header, separator)
2458

    
2459
  # Collect statistics
2460
  assert len(stats) == len(constants.RS_ALL)
2461
  assert compat.all(count >= 0 for count in stats.values())
2462

    
2463
  # Determine overall status. If there was no data, unknown fields must be
2464
  # detected via the field definitions.
2465
  if (stats[constants.RS_UNKNOWN] or
2466
      (not result.data and _GetUnknownFields(result.fields))):
2467
    status = QR_UNKNOWN
2468
  elif compat.any(count > 0 for key, count in stats.items()
2469
                  if key != constants.RS_NORMAL):
2470
    status = QR_INCOMPLETE
2471
  else:
2472
    status = QR_NORMAL
2473

    
2474
  return (status, table)
2475

    
2476

    
2477
def _GetUnknownFields(fdefs):
2478
  """Returns list of unknown fields included in C{fdefs}.
2479

2480
  @type fdefs: list of L{objects.QueryFieldDefinition}
2481

2482
  """
2483
  return [fdef for fdef in fdefs
2484
          if fdef.kind == constants.QFT_UNKNOWN]
2485

    
2486

    
2487
def _WarnUnknownFields(fdefs):
2488
  """Prints a warning to stderr if a query included unknown fields.
2489

2490
  @type fdefs: list of L{objects.QueryFieldDefinition}
2491

2492
  """
2493
  unknown = _GetUnknownFields(fdefs)
2494
  if unknown:
2495
    ToStderr("Warning: Queried for unknown fields %s",
2496
             utils.CommaJoin(fdef.name for fdef in unknown))
2497
    return True
2498

    
2499
  return False
2500

    
2501

    
2502
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2503
                format_override=None):
2504
  """Generic implementation for listing all items of a resource.
2505

2506
  @param resource: One of L{constants.QR_OP_LUXI}
2507
  @type fields: list of strings
2508
  @param fields: List of fields to query for
2509
  @type names: list of strings
2510
  @param names: Names of items to query for
2511
  @type unit: string or None
2512
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2513
    None for automatic choice (human-readable for non-separator usage,
2514
    otherwise megabytes); this is a one-letter string
2515
  @type separator: string or None
2516
  @param separator: String used to separate fields
2517
  @type header: bool
2518
  @param header: Whether to show header row
2519
  @type format_override: dict
2520
  @param format_override: Dictionary for overriding field formatting functions,
2521
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2522

2523
  """
2524
  if cl is None:
2525
    cl = GetClient()
2526

    
2527
  if not names:
2528
    names = None
2529

    
2530
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2531

    
2532
  found_unknown = _WarnUnknownFields(response.fields)
2533

    
2534
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2535
                                     header=header,
2536
                                     format_override=format_override)
2537

    
2538
  for line in data:
2539
    ToStdout(line)
2540

    
2541
  assert ((found_unknown and status == QR_UNKNOWN) or
2542
          (not found_unknown and status != QR_UNKNOWN))
2543

    
2544
  if status == QR_UNKNOWN:
2545
    return constants.EXIT_UNKNOWN_FIELD
2546

    
2547
  # TODO: Should the list command fail if not all data could be collected?
2548
  return constants.EXIT_SUCCESS
2549

    
2550

    
2551
def GenericListFields(resource, fields, separator, header, cl=None):
2552
  """Generic implementation for listing fields for a resource.
2553

2554
  @param resource: One of L{constants.QR_OP_LUXI}
2555
  @type fields: list of strings
2556
  @param fields: List of fields to query for
2557
  @type separator: string or None
2558
  @param separator: String used to separate fields
2559
  @type header: bool
2560
  @param header: Whether to show header row
2561

2562
  """
2563
  if cl is None:
2564
    cl = GetClient()
2565

    
2566
  if not fields:
2567
    fields = None
2568

    
2569
  response = cl.QueryFields(resource, fields)
2570

    
2571
  found_unknown = _WarnUnknownFields(response.fields)
2572

    
2573
  columns = [
2574
    TableColumn("Name", str, False),
2575
    TableColumn("Title", str, False),
2576
    # TODO: Add field description to master daemon
2577
    ]
2578

    
2579
  rows = [[fdef.name, fdef.title] for fdef in response.fields]
2580

    
2581
  for line in FormatTable(rows, columns, header, separator):
2582
    ToStdout(line)
2583

    
2584
  if found_unknown:
2585
    return constants.EXIT_UNKNOWN_FIELD
2586

    
2587
  return constants.EXIT_SUCCESS
2588

    
2589

    
2590
class TableColumn:
2591
  """Describes a column for L{FormatTable}.
2592

2593
  """
2594
  def __init__(self, title, fn, align_right):
2595
    """Initializes this class.
2596

2597
    @type title: string
2598
    @param title: Column title
2599
    @type fn: callable
2600
    @param fn: Formatting function
2601
    @type align_right: bool
2602
    @param align_right: Whether to align values on the right-hand side
2603

2604
    """
2605
    self.title = title
2606
    self.format = fn
2607
    self.align_right = align_right
2608

    
2609

    
2610
def _GetColFormatString(width, align_right):
2611
  """Returns the format string for a field.
2612

2613
  """
2614
  if align_right:
2615
    sign = ""
2616
  else:
2617
    sign = "-"
2618

    
2619
  return "%%%s%ss" % (sign, width)
2620

    
2621

    
2622
def FormatTable(rows, columns, header, separator):
2623
  """Formats data as a table.
2624

2625
  @type rows: list of lists
2626
  @param rows: Row data, one list per row
2627
  @type columns: list of L{TableColumn}
2628
  @param columns: Column descriptions
2629
  @type header: bool
2630
  @param header: Whether to show header row
2631
  @type separator: string or None
2632
  @param separator: String used to separate columns
2633

2634
  """
2635
  if header:
2636
    data = [[col.title for col in columns]]
2637
    colwidth = [len(col.title) for col in columns]
2638
  else:
2639
    data = []
2640
    colwidth = [0 for _ in columns]
2641

    
2642
  # Format row data
2643
  for row in rows:
2644
    assert len(row) == len(columns)
2645

    
2646
    formatted = [col.format(value) for value, col in zip(row, columns)]
2647

    
2648
    if separator is None:
2649
      # Update column widths
2650
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2651
        # Modifying a list's items while iterating is fine
2652
        colwidth[idx] = max(oldwidth, len(value))
2653

    
2654
    data.append(formatted)
2655

    
2656
  if separator is not None:
2657
    # Return early if a separator is used
2658
    return [separator.join(row) for row in data]
2659

    
2660
  if columns and not columns[-1].align_right:
2661
    # Avoid unnecessary spaces at end of line
2662
    colwidth[-1] = 0
2663

    
2664
  # Build format string
2665
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2666
                  for col, width in zip(columns, colwidth)])
2667

    
2668
  return [fmt % tuple(row) for row in data]
2669

    
2670

    
2671
def FormatTimestamp(ts):
2672
  """Formats a given timestamp.
2673

2674
  @type ts: timestamp
2675
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2676

2677
  @rtype: string
2678
  @return: a string with the formatted timestamp
2679

2680
  """
2681
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2682
    return '?'
2683
  sec, usec = ts
2684
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2685

    
2686

    
2687
def ParseTimespec(value):
2688
  """Parse a time specification.
2689

2690
  The following suffixed will be recognized:
2691

2692
    - s: seconds
2693
    - m: minutes
2694
    - h: hours
2695
    - d: day
2696
    - w: weeks
2697

2698
  Without any suffix, the value will be taken to be in seconds.
2699

2700
  """
2701
  value = str(value)
2702
  if not value:
2703
    raise errors.OpPrereqError("Empty time specification passed")
2704
  suffix_map = {
2705
    's': 1,
2706
    'm': 60,
2707
    'h': 3600,
2708
    'd': 86400,
2709
    'w': 604800,
2710
    }
2711
  if value[-1] not in suffix_map:
2712
    try:
2713
      value = int(value)
2714
    except (TypeError, ValueError):
2715
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2716
  else:
2717
    multiplier = suffix_map[value[-1]]
2718
    value = value[:-1]
2719
    if not value: # no data left after stripping the suffix
2720
      raise errors.OpPrereqError("Invalid time specification (only"
2721
                                 " suffix passed)")
2722
    try:
2723
      value = int(value) * multiplier
2724
    except (TypeError, ValueError):
2725
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2726
  return value
2727

    
2728

    
2729
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2730
                   filter_master=False):
2731
  """Returns the names of online nodes.
2732

2733
  This function will also log a warning on stderr with the names of
2734
  the online nodes.
2735

2736
  @param nodes: if not empty, use only this subset of nodes (minus the
2737
      offline ones)
2738
  @param cl: if not None, luxi client to use
2739
  @type nowarn: boolean
2740
  @param nowarn: by default, this function will output a note with the
2741
      offline nodes that are skipped; if this parameter is True the
2742
      note is not displayed
2743
  @type secondary_ips: boolean
2744
  @param secondary_ips: if True, return the secondary IPs instead of the
2745
      names, useful for doing network traffic over the replication interface
2746
      (if any)
2747
  @type filter_master: boolean
2748
  @param filter_master: if True, do not return the master node in the list
2749
      (useful in coordination with secondary_ips where we cannot check our
2750
      node name against the list)
2751

2752
  """
2753
  if cl is None:
2754
    cl = GetClient()
2755

    
2756
  if secondary_ips:
2757
    name_idx = 2
2758
  else:
2759
    name_idx = 0
2760

    
2761
  if filter_master:
2762
    master_node = cl.QueryConfigValues(["master_node"])[0]
2763
    filter_fn = lambda x: x != master_node
2764
  else:
2765
    filter_fn = lambda _: True
2766

    
2767
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2768
                         use_locking=False)
2769
  offline = [row[0] for row in result if row[1]]
2770
  if offline and not nowarn:
2771
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2772
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2773

    
2774

    
2775
def _ToStream(stream, txt, *args):
2776
  """Write a message to a stream, bypassing the logging system
2777

2778
  @type stream: file object
2779
  @param stream: the file to which we should write
2780
  @type txt: str
2781
  @param txt: the message
2782

2783
  """
2784
  if args:
2785
    args = tuple(args)
2786
    stream.write(txt % args)
2787
  else:
2788
    stream.write(txt)
2789
  stream.write('\n')
2790
  stream.flush()
2791

    
2792

    
2793
def ToStdout(txt, *args):
2794
  """Write a message to stdout only, bypassing the logging system
2795

2796
  This is just a wrapper over _ToStream.
2797

2798
  @type txt: str
2799
  @param txt: the message
2800

2801
  """
2802
  _ToStream(sys.stdout, txt, *args)
2803

    
2804

    
2805
def ToStderr(txt, *args):
2806
  """Write a message to stderr only, bypassing the logging system
2807

2808
  This is just a wrapper over _ToStream.
2809

2810
  @type txt: str
2811
  @param txt: the message
2812

2813
  """
2814
  _ToStream(sys.stderr, txt, *args)
2815

    
2816

    
2817
class JobExecutor(object):
2818
  """Class which manages the submission and execution of multiple jobs.
2819

2820
  Note that instances of this class should not be reused between
2821
  GetResults() calls.
2822

2823
  """
2824
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2825
    self.queue = []
2826
    if cl is None:
2827
      cl = GetClient()
2828
    self.cl = cl
2829
    self.verbose = verbose
2830
    self.jobs = []
2831
    self.opts = opts
2832
    self.feedback_fn = feedback_fn
2833

    
2834
  def QueueJob(self, name, *ops):
2835
    """Record a job for later submit.
2836

2837
    @type name: string
2838
    @param name: a description of the job, will be used in WaitJobSet
2839
    """
2840
    SetGenericOpcodeOpts(ops, self.opts)
2841
    self.queue.append((name, ops))
2842

    
2843
  def SubmitPending(self, each=False):
2844
    """Submit all pending jobs.
2845

2846
    """
2847
    if each:
2848
      results = []
2849
      for row in self.queue:
2850
        # SubmitJob will remove the success status, but raise an exception if
2851
        # the submission fails, so we'll notice that anyway.
2852
        results.append([True, self.cl.SubmitJob(row[1])])
2853
    else:
2854
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2855
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2856
                                                            self.queue)):
2857
      self.jobs.append((idx, status, data, name))
2858

    
2859
  def _ChooseJob(self):
2860
    """Choose a non-waiting/queued job to poll next.
2861

2862
    """
2863
    assert self.jobs, "_ChooseJob called with empty job list"
2864

    
2865
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2866
    assert result
2867

    
2868
    for job_data, status in zip(self.jobs, result):
2869
      if (isinstance(status, list) and status and
2870
          status[0] in (constants.JOB_STATUS_QUEUED,
2871
                        constants.JOB_STATUS_WAITLOCK,
2872
                        constants.JOB_STATUS_CANCELING)):
2873
        # job is still present and waiting
2874
        continue
2875
      # good candidate found (either running job or lost job)
2876
      self.jobs.remove(job_data)
2877
      return job_data
2878

    
2879
    # no job found
2880
    return self.jobs.pop(0)
2881

    
2882
  def GetResults(self):
2883
    """Wait for and return the results of all jobs.
2884

2885
    @rtype: list
2886
    @return: list of tuples (success, job results), in the same order
2887
        as the submitted jobs; if a job has failed, instead of the result
2888
        there will be the error message
2889

2890
    """
2891
    if not self.jobs:
2892
      self.SubmitPending()
2893
    results = []
2894
    if self.verbose:
2895
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2896
      if ok_jobs:
2897
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2898

    
2899
    # first, remove any non-submitted jobs
2900
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2901
    for idx, _, jid, name in failures:
2902
      ToStderr("Failed to submit job for %s: %s", name, jid)
2903
      results.append((idx, False, jid))
2904

    
2905
    while self.jobs:
2906
      (idx, _, jid, name) = self._ChooseJob()
2907
      ToStdout("Waiting for job %s for %s...", jid, name)
2908
      try:
2909
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2910
        success = True
2911
      except errors.JobLost, err:
2912
        _, job_result = FormatError(err)
2913
        ToStderr("Job %s for %s has been archived, cannot check its result",
2914
                 jid, name)
2915
        success = False
2916
      except (errors.GenericError, luxi.ProtocolError), err:
2917
        _, job_result = FormatError(err)
2918
        success = False
2919
        # the error message will always be shown, verbose or not
2920
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2921

    
2922
      results.append((idx, success, job_result))
2923

    
2924
    # sort based on the index, then drop it
2925
    results.sort()
2926
    results = [i[1:] for i in results]
2927

    
2928
    return results
2929

    
2930
  def WaitOrShow(self, wait):
2931
    """Wait for job results or only print the job IDs.
2932

2933
    @type wait: boolean
2934
    @param wait: whether to wait or not
2935

2936
    """
2937
    if wait:
2938
      return self.GetResults()
2939
    else:
2940
      if not self.jobs:
2941
        self.SubmitPending()
2942
      for _, status, result, name in self.jobs:
2943
        if status:
2944
          ToStdout("%s: %s", result, name)
2945
        else:
2946
          ToStderr("Failure for %s: %s", name, result)
2947
      return [row[1:3] for row in self.jobs]
2948

    
2949

    
2950
def FormatParameterDict(buf, param_dict, actual, level=1):
2951
  """Formats a parameter dictionary.
2952

2953
  @type buf: L{StringIO}
2954
  @param buf: the buffer into which to write
2955
  @type param_dict: dict
2956
  @param param_dict: the own parameters
2957
  @type actual: dict
2958
  @param actual: the current parameter set (including defaults)
2959
  @param level: Level of indent
2960

2961
  """
2962
  indent = "  " * level
2963
  for key in sorted(actual):
2964
    val = param_dict.get(key, "default (%s)" % actual[key])
2965
    buf.write("%s- %s: %s\n" % (indent, key, val))