Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 685d3b42

History | View | Annotate | Download (95.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "AUTO_PROMOTE_OPT",
55
  "AUTO_REPLACE_OPT",
56
  "BACKEND_OPT",
57
  "BLK_OS_OPT",
58
  "CAPAB_MASTER_OPT",
59
  "CAPAB_VM_OPT",
60
  "CLEANUP_OPT",
61
  "CLUSTER_DOMAIN_SECRET_OPT",
62
  "CONFIRM_OPT",
63
  "CP_SIZE_OPT",
64
  "DEBUG_OPT",
65
  "DEBUG_SIMERR_OPT",
66
  "DISKIDX_OPT",
67
  "DISK_OPT",
68
  "DISK_TEMPLATE_OPT",
69
  "DRAINED_OPT",
70
  "DRY_RUN_OPT",
71
  "DRBD_HELPER_OPT",
72
  "EARLY_RELEASE_OPT",
73
  "ENABLED_HV_OPT",
74
  "ERROR_CODES_OPT",
75
  "FIELDS_OPT",
76
  "FILESTORE_DIR_OPT",
77
  "FILESTORE_DRIVER_OPT",
78
  "FORCE_OPT",
79
  "FORCE_VARIANT_OPT",
80
  "GLOBAL_FILEDIR_OPT",
81
  "HID_OS_OPT",
82
  "HVLIST_OPT",
83
  "HVOPTS_OPT",
84
  "HYPERVISOR_OPT",
85
  "IALLOCATOR_OPT",
86
  "DEFAULT_IALLOCATOR_OPT",
87
  "IDENTIFY_DEFAULTS_OPT",
88
  "IGNORE_CONSIST_OPT",
89
  "IGNORE_FAILURES_OPT",
90
  "IGNORE_OFFLINE_OPT",
91
  "IGNORE_REMOVE_FAILURES_OPT",
92
  "IGNORE_SECONDARIES_OPT",
93
  "IGNORE_SIZE_OPT",
94
  "INTERVAL_OPT",
95
  "MAC_PREFIX_OPT",
96
  "MAINTAIN_NODE_HEALTH_OPT",
97
  "MASTER_NETDEV_OPT",
98
  "MC_OPT",
99
  "MIGRATION_MODE_OPT",
100
  "NET_OPT",
101
  "NEW_CLUSTER_CERT_OPT",
102
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103
  "NEW_CONFD_HMAC_KEY_OPT",
104
  "NEW_RAPI_CERT_OPT",
105
  "NEW_SECONDARY_OPT",
106
  "NIC_PARAMS_OPT",
107
  "NODE_LIST_OPT",
108
  "NODE_PLACEMENT_OPT",
109
  "NODEGROUP_OPT",
110
  "NODE_PARAMS_OPT",
111
  "NODE_POWERED_OPT",
112
  "NODRBD_STORAGE_OPT",
113
  "NOHDR_OPT",
114
  "NOIPCHECK_OPT",
115
  "NO_INSTALL_OPT",
116
  "NONAMECHECK_OPT",
117
  "NOLVM_STORAGE_OPT",
118
  "NOMODIFY_ETCHOSTS_OPT",
119
  "NOMODIFY_SSH_SETUP_OPT",
120
  "NONICS_OPT",
121
  "NONLIVE_OPT",
122
  "NONPLUS1_OPT",
123
  "NOSHUTDOWN_OPT",
124
  "NOSTART_OPT",
125
  "NOSSH_KEYCHECK_OPT",
126
  "NOVOTING_OPT",
127
  "NWSYNC_OPT",
128
  "ON_PRIMARY_OPT",
129
  "ON_SECONDARY_OPT",
130
  "OFFLINE_OPT",
131
  "OSPARAMS_OPT",
132
  "OS_OPT",
133
  "OS_SIZE_OPT",
134
  "PREALLOC_WIPE_DISKS_OPT",
135
  "PRIMARY_IP_VERSION_OPT",
136
  "PRIORITY_OPT",
137
  "RAPI_CERT_OPT",
138
  "READD_OPT",
139
  "REBOOT_TYPE_OPT",
140
  "REMOVE_INSTANCE_OPT",
141
  "REMOVE_UIDS_OPT",
142
  "RESERVED_LVS_OPT",
143
  "ROMAN_OPT",
144
  "SECONDARY_IP_OPT",
145
  "SELECT_OS_OPT",
146
  "SEP_OPT",
147
  "SHOWCMD_OPT",
148
  "SHUTDOWN_TIMEOUT_OPT",
149
  "SINGLE_NODE_OPT",
150
  "SRC_DIR_OPT",
151
  "SRC_NODE_OPT",
152
  "SUBMIT_OPT",
153
  "STATIC_OPT",
154
  "SYNC_OPT",
155
  "TAG_SRC_OPT",
156
  "TIMEOUT_OPT",
157
  "UIDPOOL_OPT",
158
  "USEUNITS_OPT",
159
  "USE_REPL_NET_OPT",
160
  "VERBOSE_OPT",
161
  "VG_NAME_OPT",
162
  "YES_DOIT_OPT",
163
  # Generic functions for CLI programs
164
  "ConfirmOperation",
165
  "GenericMain",
166
  "GenericInstanceCreate",
167
  "GenericList",
168
  "GenericListFields",
169
  "GetClient",
170
  "GetOnlineNodes",
171
  "JobExecutor",
172
  "JobSubmittedException",
173
  "ParseTimespec",
174
  "RunWhileClusterStopped",
175
  "SubmitOpCode",
176
  "SubmitOrSend",
177
  "UsesRPC",
178
  # Formatting functions
179
  "ToStderr", "ToStdout",
180
  "FormatError",
181
  "FormatQueryResult",
182
  "FormatParameterDict",
183
  "GenerateTable",
184
  "AskUser",
185
  "FormatTimestamp",
186
  "FormatLogMessage",
187
  # Tags functions
188
  "ListTags",
189
  "AddTags",
190
  "RemoveTags",
191
  # command line options support infrastructure
192
  "ARGS_MANY_INSTANCES",
193
  "ARGS_MANY_NODES",
194
  "ARGS_MANY_GROUPS",
195
  "ARGS_NONE",
196
  "ARGS_ONE_INSTANCE",
197
  "ARGS_ONE_NODE",
198
  "ARGS_ONE_GROUP",
199
  "ARGS_ONE_OS",
200
  "ArgChoice",
201
  "ArgCommand",
202
  "ArgFile",
203
  "ArgGroup",
204
  "ArgHost",
205
  "ArgInstance",
206
  "ArgJobId",
207
  "ArgNode",
208
  "ArgOs",
209
  "ArgSuggest",
210
  "ArgUnknown",
211
  "OPT_COMPL_INST_ADD_NODES",
212
  "OPT_COMPL_MANY_NODES",
213
  "OPT_COMPL_ONE_IALLOCATOR",
214
  "OPT_COMPL_ONE_INSTANCE",
215
  "OPT_COMPL_ONE_NODE",
216
  "OPT_COMPL_ONE_NODEGROUP",
217
  "OPT_COMPL_ONE_OS",
218
  "cli_option",
219
  "SplitNodeOption",
220
  "CalculateOSNames",
221
  "ParseFields",
222
  "COMMON_CREATE_OPTS",
223
  ]
224

    
225
NO_PREFIX = "no_"
226
UN_PREFIX = "-"
227

    
228
#: Priorities (sorted)
229
_PRIORITY_NAMES = [
230
  ("low", constants.OP_PRIO_LOW),
231
  ("normal", constants.OP_PRIO_NORMAL),
232
  ("high", constants.OP_PRIO_HIGH),
233
  ]
234

    
235
#: Priority dictionary for easier lookup
236
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
237
# we migrate to Python 2.6
238
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
239

    
240
# Query result status for clients
241
(QR_NORMAL,
242
 QR_UNKNOWN,
243
 QR_INCOMPLETE) = range(3)
244

    
245

    
246
class _Argument:
247
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
248
    self.min = min
249
    self.max = max
250

    
251
  def __repr__(self):
252
    return ("<%s min=%s max=%s>" %
253
            (self.__class__.__name__, self.min, self.max))
254

    
255

    
256
class ArgSuggest(_Argument):
257
  """Suggesting argument.
258

259
  Value can be any of the ones passed to the constructor.
260

261
  """
262
  # pylint: disable-msg=W0622
263
  def __init__(self, min=0, max=None, choices=None):
264
    _Argument.__init__(self, min=min, max=max)
265
    self.choices = choices
266

    
267
  def __repr__(self):
268
    return ("<%s min=%s max=%s choices=%r>" %
269
            (self.__class__.__name__, self.min, self.max, self.choices))
270

    
271

    
272
class ArgChoice(ArgSuggest):
273
  """Choice argument.
274

275
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
276
  but value must be one of the choices.
277

278
  """
279

    
280

    
281
class ArgUnknown(_Argument):
282
  """Unknown argument to program (e.g. determined at runtime).
283

284
  """
285

    
286

    
287
class ArgInstance(_Argument):
288
  """Instances argument.
289

290
  """
291

    
292

    
293
class ArgNode(_Argument):
294
  """Node argument.
295

296
  """
297

    
298

    
299
class ArgGroup(_Argument):
300
  """Node group argument.
301

302
  """
303

    
304

    
305
class ArgJobId(_Argument):
306
  """Job ID argument.
307

308
  """
309

    
310

    
311
class ArgFile(_Argument):
312
  """File path argument.
313

314
  """
315

    
316

    
317
class ArgCommand(_Argument):
318
  """Command argument.
319

320
  """
321

    
322

    
323
class ArgHost(_Argument):
324
  """Host argument.
325

326
  """
327

    
328

    
329
class ArgOs(_Argument):
330
  """OS argument.
331

332
  """
333

    
334

    
335
ARGS_NONE = []
336
ARGS_MANY_INSTANCES = [ArgInstance()]
337
ARGS_MANY_NODES = [ArgNode()]
338
ARGS_MANY_GROUPS = [ArgGroup()]
339
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
340
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
341
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
342
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
343

    
344

    
345
def _ExtractTagsObject(opts, args):
346
  """Extract the tag type object.
347

348
  Note that this function will modify its args parameter.
349

350
  """
351
  if not hasattr(opts, "tag_type"):
352
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
353
  kind = opts.tag_type
354
  if kind == constants.TAG_CLUSTER:
355
    retval = kind, kind
356
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
357
    if not args:
358
      raise errors.OpPrereqError("no arguments passed to the command")
359
    name = args.pop(0)
360
    retval = kind, name
361
  else:
362
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
363
  return retval
364

    
365

    
366
def _ExtendTags(opts, args):
367
  """Extend the args if a source file has been given.
368

369
  This function will extend the tags with the contents of the file
370
  passed in the 'tags_source' attribute of the opts parameter. A file
371
  named '-' will be replaced by stdin.
372

373
  """
374
  fname = opts.tags_source
375
  if fname is None:
376
    return
377
  if fname == "-":
378
    new_fh = sys.stdin
379
  else:
380
    new_fh = open(fname, "r")
381
  new_data = []
382
  try:
383
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
384
    # because of python bug 1633941
385
    while True:
386
      line = new_fh.readline()
387
      if not line:
388
        break
389
      new_data.append(line.strip())
390
  finally:
391
    new_fh.close()
392
  args.extend(new_data)
393

    
394

    
395
def ListTags(opts, args):
396
  """List the tags on a given object.
397

398
  This is a generic implementation that knows how to deal with all
399
  three cases of tag objects (cluster, node, instance). The opts
400
  argument is expected to contain a tag_type field denoting what
401
  object type we work on.
402

403
  """
404
  kind, name = _ExtractTagsObject(opts, args)
405
  cl = GetClient()
406
  result = cl.QueryTags(kind, name)
407
  result = list(result)
408
  result.sort()
409
  for tag in result:
410
    ToStdout(tag)
411

    
412

    
413
def AddTags(opts, args):
414
  """Add tags on a given object.
415

416
  This is a generic implementation that knows how to deal with all
417
  three cases of tag objects (cluster, node, instance). The opts
418
  argument is expected to contain a tag_type field denoting what
419
  object type we work on.
420

421
  """
422
  kind, name = _ExtractTagsObject(opts, args)
423
  _ExtendTags(opts, args)
424
  if not args:
425
    raise errors.OpPrereqError("No tags to be added")
426
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
427
  SubmitOpCode(op, opts=opts)
428

    
429

    
430
def RemoveTags(opts, args):
431
  """Remove tags from a given object.
432

433
  This is a generic implementation that knows how to deal with all
434
  three cases of tag objects (cluster, node, instance). The opts
435
  argument is expected to contain a tag_type field denoting what
436
  object type we work on.
437

438
  """
439
  kind, name = _ExtractTagsObject(opts, args)
440
  _ExtendTags(opts, args)
441
  if not args:
442
    raise errors.OpPrereqError("No tags to be removed")
443
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
444
  SubmitOpCode(op, opts=opts)
445

    
446

    
447
def check_unit(option, opt, value): # pylint: disable-msg=W0613
448
  """OptParsers custom converter for units.
449

450
  """
451
  try:
452
    return utils.ParseUnit(value)
453
  except errors.UnitParseError, err:
454
    raise OptionValueError("option %s: %s" % (opt, err))
455

    
456

    
457
def _SplitKeyVal(opt, data):
458
  """Convert a KeyVal string into a dict.
459

460
  This function will convert a key=val[,...] string into a dict. Empty
461
  values will be converted specially: keys which have the prefix 'no_'
462
  will have the value=False and the prefix stripped, the others will
463
  have value=True.
464

465
  @type opt: string
466
  @param opt: a string holding the option name for which we process the
467
      data, used in building error messages
468
  @type data: string
469
  @param data: a string of the format key=val,key=val,...
470
  @rtype: dict
471
  @return: {key=val, key=val}
472
  @raises errors.ParameterError: if there are duplicate keys
473

474
  """
475
  kv_dict = {}
476
  if data:
477
    for elem in utils.UnescapeAndSplit(data, sep=","):
478
      if "=" in elem:
479
        key, val = elem.split("=", 1)
480
      else:
481
        if elem.startswith(NO_PREFIX):
482
          key, val = elem[len(NO_PREFIX):], False
483
        elif elem.startswith(UN_PREFIX):
484
          key, val = elem[len(UN_PREFIX):], None
485
        else:
486
          key, val = elem, True
487
      if key in kv_dict:
488
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
489
                                    (key, opt))
490
      kv_dict[key] = val
491
  return kv_dict
492

    
493

    
494
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
495
  """Custom parser for ident:key=val,key=val options.
496

497
  This will store the parsed values as a tuple (ident, {key: val}). As such,
498
  multiple uses of this option via action=append is possible.
499

500
  """
501
  if ":" not in value:
502
    ident, rest = value, ''
503
  else:
504
    ident, rest = value.split(":", 1)
505

    
506
  if ident.startswith(NO_PREFIX):
507
    if rest:
508
      msg = "Cannot pass options when removing parameter groups: %s" % value
509
      raise errors.ParameterError(msg)
510
    retval = (ident[len(NO_PREFIX):], False)
511
  elif ident.startswith(UN_PREFIX):
512
    if rest:
513
      msg = "Cannot pass options when removing parameter groups: %s" % value
514
      raise errors.ParameterError(msg)
515
    retval = (ident[len(UN_PREFIX):], None)
516
  else:
517
    kv_dict = _SplitKeyVal(opt, rest)
518
    retval = (ident, kv_dict)
519
  return retval
520

    
521

    
522
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
523
  """Custom parser class for key=val,key=val options.
524

525
  This will store the parsed values as a dict {key: val}.
526

527
  """
528
  return _SplitKeyVal(opt, value)
529

    
530

    
531
def check_bool(option, opt, value): # pylint: disable-msg=W0613
532
  """Custom parser for yes/no options.
533

534
  This will store the parsed value as either True or False.
535

536
  """
537
  value = value.lower()
538
  if value == constants.VALUE_FALSE or value == "no":
539
    return False
540
  elif value == constants.VALUE_TRUE or value == "yes":
541
    return True
542
  else:
543
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
544

    
545

    
546
# completion_suggestion is normally a list. Using numeric values not evaluating
547
# to False for dynamic completion.
548
(OPT_COMPL_MANY_NODES,
549
 OPT_COMPL_ONE_NODE,
550
 OPT_COMPL_ONE_INSTANCE,
551
 OPT_COMPL_ONE_OS,
552
 OPT_COMPL_ONE_IALLOCATOR,
553
 OPT_COMPL_INST_ADD_NODES,
554
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
555

    
556
OPT_COMPL_ALL = frozenset([
557
  OPT_COMPL_MANY_NODES,
558
  OPT_COMPL_ONE_NODE,
559
  OPT_COMPL_ONE_INSTANCE,
560
  OPT_COMPL_ONE_OS,
561
  OPT_COMPL_ONE_IALLOCATOR,
562
  OPT_COMPL_INST_ADD_NODES,
563
  OPT_COMPL_ONE_NODEGROUP,
564
  ])
565

    
566

    
567
class CliOption(Option):
568
  """Custom option class for optparse.
569

570
  """
571
  ATTRS = Option.ATTRS + [
572
    "completion_suggest",
573
    ]
574
  TYPES = Option.TYPES + (
575
    "identkeyval",
576
    "keyval",
577
    "unit",
578
    "bool",
579
    )
580
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
581
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
582
  TYPE_CHECKER["keyval"] = check_key_val
583
  TYPE_CHECKER["unit"] = check_unit
584
  TYPE_CHECKER["bool"] = check_bool
585

    
586

    
587
# optparse.py sets make_option, so we do it for our own option class, too
588
cli_option = CliOption
589

    
590

    
591
_YORNO = "yes|no"
592

    
593
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
594
                       help="Increase debugging level")
595

    
596
NOHDR_OPT = cli_option("--no-headers", default=False,
597
                       action="store_true", dest="no_headers",
598
                       help="Don't display column headers")
599

    
600
SEP_OPT = cli_option("--separator", default=None,
601
                     action="store", dest="separator",
602
                     help=("Separator between output fields"
603
                           " (defaults to one space)"))
604

    
605
USEUNITS_OPT = cli_option("--units", default=None,
606
                          dest="units", choices=('h', 'm', 'g', 't'),
607
                          help="Specify units for output (one of h/m/g/t)")
608

    
609
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
610
                        type="string", metavar="FIELDS",
611
                        help="Comma separated list of output fields")
612

    
613
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
614
                       default=False, help="Force the operation")
615

    
616
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
617
                         default=False, help="Do not require confirmation")
618

    
619
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
620
                                  action="store_true", default=False,
621
                                  help=("Ignore offline nodes and do as much"
622
                                        " as possible"))
623

    
624
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
625
                         default=None, help="File with tag names")
626

    
627
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
628
                        default=False, action="store_true",
629
                        help=("Submit the job and return the job ID, but"
630
                              " don't wait for the job to finish"))
631

    
632
SYNC_OPT = cli_option("--sync", dest="do_locking",
633
                      default=False, action="store_true",
634
                      help=("Grab locks while doing the queries"
635
                            " in order to ensure more consistent results"))
636

    
637
DRY_RUN_OPT = cli_option("--dry-run", default=False,
638
                         action="store_true",
639
                         help=("Do not execute the operation, just run the"
640
                               " check steps and verify it it could be"
641
                               " executed"))
642

    
643
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
644
                         action="store_true",
645
                         help="Increase the verbosity of the operation")
646

    
647
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
648
                              action="store_true", dest="simulate_errors",
649
                              help="Debugging option that makes the operation"
650
                              " treat most runtime checks as failed")
651

    
652
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
653
                        default=True, action="store_false",
654
                        help="Don't wait for sync (DANGEROUS!)")
655

    
656
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
657
                               help="Custom disk setup (diskless, file,"
658
                               " plain or drbd)",
659
                               default=None, metavar="TEMPL",
660
                               choices=list(constants.DISK_TEMPLATES))
661

    
662
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
663
                        help="Do not create any network cards for"
664
                        " the instance")
665

    
666
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
667
                               help="Relative path under default cluster-wide"
668
                               " file storage dir to store file-based disks",
669
                               default=None, metavar="<DIR>")
670

    
671
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
672
                                  help="Driver to use for image files",
673
                                  default="loop", metavar="<DRIVER>",
674
                                  choices=list(constants.FILE_DRIVER))
675

    
676
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
677
                            help="Select nodes for the instance automatically"
678
                            " using the <NAME> iallocator plugin",
679
                            default=None, type="string",
680
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
681

    
682
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
683
                            metavar="<NAME>",
684
                            help="Set the default instance allocator plugin",
685
                            default=None, type="string",
686
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
687

    
688
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
689
                    metavar="<os>",
690
                    completion_suggest=OPT_COMPL_ONE_OS)
691

    
692
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
693
                         type="keyval", default={},
694
                         help="OS parameters")
695

    
696
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
697
                               action="store_true", default=False,
698
                               help="Force an unknown variant")
699

    
700
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
701
                            action="store_true", default=False,
702
                            help="Do not install the OS (will"
703
                            " enable no-start)")
704

    
705
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
706
                         type="keyval", default={},
707
                         help="Backend parameters")
708

    
709
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
710
                         default={}, dest="hvparams",
711
                         help="Hypervisor parameters")
712

    
713
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
714
                            help="Hypervisor and hypervisor options, in the"
715
                            " format hypervisor:option=value,option=value,...",
716
                            default=None, type="identkeyval")
717

    
718
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
719
                        help="Hypervisor and hypervisor options, in the"
720
                        " format hypervisor:option=value,option=value,...",
721
                        default=[], action="append", type="identkeyval")
722

    
723
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
724
                           action="store_false",
725
                           help="Don't check that the instance's IP"
726
                           " is alive")
727

    
728
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
729
                             default=True, action="store_false",
730
                             help="Don't check that the instance's name"
731
                             " is resolvable")
732

    
733
NET_OPT = cli_option("--net",
734
                     help="NIC parameters", default=[],
735
                     dest="nics", action="append", type="identkeyval")
736

    
737
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
738
                      dest="disks", action="append", type="identkeyval")
739

    
740
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
741
                         help="Comma-separated list of disks"
742
                         " indices to act on (e.g. 0,2) (optional,"
743
                         " defaults to all disks)")
744

    
745
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
746
                         help="Enforces a single-disk configuration using the"
747
                         " given disk size, in MiB unless a suffix is used",
748
                         default=None, type="unit", metavar="<size>")
749

    
750
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
751
                                dest="ignore_consistency",
752
                                action="store_true", default=False,
753
                                help="Ignore the consistency of the disks on"
754
                                " the secondary")
755

    
756
NONLIVE_OPT = cli_option("--non-live", dest="live",
757
                         default=True, action="store_false",
758
                         help="Do a non-live migration (this usually means"
759
                         " freeze the instance, save the state, transfer and"
760
                         " only then resume running on the secondary node)")
761

    
762
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
763
                                default=None,
764
                                choices=list(constants.HT_MIGRATION_MODES),
765
                                help="Override default migration mode (choose"
766
                                " either live or non-live")
767

    
768
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
769
                                help="Target node and optional secondary node",
770
                                metavar="<pnode>[:<snode>]",
771
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
772

    
773
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
774
                           action="append", metavar="<node>",
775
                           help="Use only this node (can be used multiple"
776
                           " times, if not given defaults to all nodes)",
777
                           completion_suggest=OPT_COMPL_ONE_NODE)
778

    
779
NODEGROUP_OPT = cli_option("-g", "--node-group",
780
                           dest="nodegroup",
781
                           help="Node group (name or uuid)",
782
                           metavar="<nodegroup>",
783
                           default=None, type="string",
784
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
785

    
786
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
787
                             metavar="<node>",
788
                             completion_suggest=OPT_COMPL_ONE_NODE)
789

    
790
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
791
                         action="store_false",
792
                         help="Don't start the instance after creation")
793

    
794
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
795
                         action="store_true", default=False,
796
                         help="Show command instead of executing it")
797

    
798
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
799
                         default=False, action="store_true",
800
                         help="Instead of performing the migration, try to"
801
                         " recover from a failed cleanup. This is safe"
802
                         " to run even if the instance is healthy, but it"
803
                         " will create extra replication traffic and "
804
                         " disrupt briefly the replication (like during the"
805
                         " migration")
806

    
807
STATIC_OPT = cli_option("-s", "--static", dest="static",
808
                        action="store_true", default=False,
809
                        help="Only show configuration data, not runtime data")
810

    
811
ALL_OPT = cli_option("--all", dest="show_all",
812
                     default=False, action="store_true",
813
                     help="Show info on all instances on the cluster."
814
                     " This can take a long time to run, use wisely")
815

    
816
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
817
                           action="store_true", default=False,
818
                           help="Interactive OS reinstall, lists available"
819
                           " OS templates for selection")
820

    
821
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
822
                                 action="store_true", default=False,
823
                                 help="Remove the instance from the cluster"
824
                                 " configuration even if there are failures"
825
                                 " during the removal process")
826

    
827
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
828
                                        dest="ignore_remove_failures",
829
                                        action="store_true", default=False,
830
                                        help="Remove the instance from the"
831
                                        " cluster configuration even if there"
832
                                        " are failures during the removal"
833
                                        " process")
834

    
835
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
836
                                 action="store_true", default=False,
837
                                 help="Remove the instance from the cluster")
838

    
839
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
840
                               help="Specifies the new secondary node",
841
                               metavar="NODE", default=None,
842
                               completion_suggest=OPT_COMPL_ONE_NODE)
843

    
844
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
845
                            default=False, action="store_true",
846
                            help="Replace the disk(s) on the primary"
847
                            " node (only for the drbd template)")
848

    
849
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
850
                              default=False, action="store_true",
851
                              help="Replace the disk(s) on the secondary"
852
                              " node (only for the drbd template)")
853

    
854
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
855
                              default=False, action="store_true",
856
                              help="Lock all nodes and auto-promote as needed"
857
                              " to MC status")
858

    
859
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
860
                              default=False, action="store_true",
861
                              help="Automatically replace faulty disks"
862
                              " (only for the drbd template)")
863

    
864
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
865
                             default=False, action="store_true",
866
                             help="Ignore current recorded size"
867
                             " (useful for forcing activation when"
868
                             " the recorded size is wrong)")
869

    
870
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
871
                          metavar="<node>",
872
                          completion_suggest=OPT_COMPL_ONE_NODE)
873

    
874
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
875
                         metavar="<dir>")
876

    
877
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
878
                              help="Specify the secondary ip for the node",
879
                              metavar="ADDRESS", default=None)
880

    
881
READD_OPT = cli_option("--readd", dest="readd",
882
                       default=False, action="store_true",
883
                       help="Readd old node after replacing it")
884

    
885
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
886
                                default=True, action="store_false",
887
                                help="Disable SSH key fingerprint checking")
888

    
889
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
890
                    type="bool", default=None, metavar=_YORNO,
891
                    help="Set the master_candidate flag on the node")
892

    
893
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
894
                         type="bool", default=None,
895
                         help=("Set the offline flag on the node"
896
                               " (cluster does not communicate with offline"
897
                               " nodes)"))
898

    
899
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
900
                         type="bool", default=None,
901
                         help=("Set the drained flag on the node"
902
                               " (excluded from allocation operations)"))
903

    
904
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
905
                    type="bool", default=None, metavar=_YORNO,
906
                    help="Set the master_capable flag on the node")
907

    
908
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
909
                    type="bool", default=None, metavar=_YORNO,
910
                    help="Set the vm_capable flag on the node")
911

    
912
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
913
                             type="bool", default=None, metavar=_YORNO,
914
                             help="Set the allocatable flag on a volume")
915

    
916
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
917
                               help="Disable support for lvm based instances"
918
                               " (cluster-wide)",
919
                               action="store_false", default=True)
920

    
921
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
922
                            dest="enabled_hypervisors",
923
                            help="Comma-separated list of hypervisors",
924
                            type="string", default=None)
925

    
926
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
927
                            type="keyval", default={},
928
                            help="NIC parameters")
929

    
930
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
931
                         dest="candidate_pool_size", type="int",
932
                         help="Set the candidate pool size")
933

    
934
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
935
                         help=("Enables LVM and specifies the volume group"
936
                               " name (cluster-wide) for disk allocation"
937
                               " [%s]" % constants.DEFAULT_VG),
938
                         metavar="VG", default=None)
939

    
940
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
941
                          help="Destroy cluster", action="store_true")
942

    
943
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
944
                          help="Skip node agreement check (dangerous)",
945
                          action="store_true", default=False)
946

    
947
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
948
                            help="Specify the mac prefix for the instance IP"
949
                            " addresses, in the format XX:XX:XX",
950
                            metavar="PREFIX",
951
                            default=None)
952

    
953
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
954
                               help="Specify the node interface (cluster-wide)"
955
                               " on which the master IP address will be added"
956
                               " (cluster init default: %s)" %
957
                               constants.DEFAULT_BRIDGE,
958
                               metavar="NETDEV",
959
                               default=None)
960

    
961
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
962
                                help="Specify the default directory (cluster-"
963
                                "wide) for storing the file-based disks [%s]" %
964
                                constants.DEFAULT_FILE_STORAGE_DIR,
965
                                metavar="DIR",
966
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
967

    
968
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
969
                                   help="Don't modify /etc/hosts",
970
                                   action="store_false", default=True)
971

    
972
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
973
                                    help="Don't initialize SSH keys",
974
                                    action="store_false", default=True)
975

    
976
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
977
                             help="Enable parseable error messages",
978
                             action="store_true", default=False)
979

    
980
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
981
                          help="Skip N+1 memory redundancy tests",
982
                          action="store_true", default=False)
983

    
984
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
985
                             help="Type of reboot: soft/hard/full",
986
                             default=constants.INSTANCE_REBOOT_HARD,
987
                             metavar="<REBOOT>",
988
                             choices=list(constants.REBOOT_TYPES))
989

    
990
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
991
                                    dest="ignore_secondaries",
992
                                    default=False, action="store_true",
993
                                    help="Ignore errors from secondaries")
994

    
995
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
996
                            action="store_false", default=True,
997
                            help="Don't shutdown the instance (unsafe)")
998

    
999
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1000
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1001
                         help="Maximum time to wait")
1002

    
1003
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1004
                         dest="shutdown_timeout", type="int",
1005
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1006
                         help="Maximum time to wait for instance shutdown")
1007

    
1008
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1009
                          default=None,
1010
                          help=("Number of seconds between repetions of the"
1011
                                " command"))
1012

    
1013
EARLY_RELEASE_OPT = cli_option("--early-release",
1014
                               dest="early_release", default=False,
1015
                               action="store_true",
1016
                               help="Release the locks on the secondary"
1017
                               " node(s) early")
1018

    
1019
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1020
                                  dest="new_cluster_cert",
1021
                                  default=False, action="store_true",
1022
                                  help="Generate a new cluster certificate")
1023

    
1024
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1025
                           default=None,
1026
                           help="File containing new RAPI certificate")
1027

    
1028
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1029
                               default=None, action="store_true",
1030
                               help=("Generate a new self-signed RAPI"
1031
                                     " certificate"))
1032

    
1033
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1034
                                    dest="new_confd_hmac_key",
1035
                                    default=False, action="store_true",
1036
                                    help=("Create a new HMAC key for %s" %
1037
                                          constants.CONFD))
1038

    
1039
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1040
                                       dest="cluster_domain_secret",
1041
                                       default=None,
1042
                                       help=("Load new new cluster domain"
1043
                                             " secret from file"))
1044

    
1045
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1046
                                           dest="new_cluster_domain_secret",
1047
                                           default=False, action="store_true",
1048
                                           help=("Create a new cluster domain"
1049
                                                 " secret"))
1050

    
1051
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1052
                              dest="use_replication_network",
1053
                              help="Whether to use the replication network"
1054
                              " for talking to the nodes",
1055
                              action="store_true", default=False)
1056

    
1057
MAINTAIN_NODE_HEALTH_OPT = \
1058
    cli_option("--maintain-node-health", dest="maintain_node_health",
1059
               metavar=_YORNO, default=None, type="bool",
1060
               help="Configure the cluster to automatically maintain node"
1061
               " health, by shutting down unknown instances, shutting down"
1062
               " unknown DRBD devices, etc.")
1063

    
1064
IDENTIFY_DEFAULTS_OPT = \
1065
    cli_option("--identify-defaults", dest="identify_defaults",
1066
               default=False, action="store_true",
1067
               help="Identify which saved instance parameters are equal to"
1068
               " the current cluster defaults and set them as such, instead"
1069
               " of marking them as overridden")
1070

    
1071
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1072
                         action="store", dest="uid_pool",
1073
                         help=("A list of user-ids or user-id"
1074
                               " ranges separated by commas"))
1075

    
1076
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1077
                          action="store", dest="add_uids",
1078
                          help=("A list of user-ids or user-id"
1079
                                " ranges separated by commas, to be"
1080
                                " added to the user-id pool"))
1081

    
1082
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1083
                             action="store", dest="remove_uids",
1084
                             help=("A list of user-ids or user-id"
1085
                                   " ranges separated by commas, to be"
1086
                                   " removed from the user-id pool"))
1087

    
1088
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1089
                             action="store", dest="reserved_lvs",
1090
                             help=("A comma-separated list of reserved"
1091
                                   " logical volumes names, that will be"
1092
                                   " ignored by cluster verify"))
1093

    
1094
ROMAN_OPT = cli_option("--roman",
1095
                       dest="roman_integers", default=False,
1096
                       action="store_true",
1097
                       help="Use roman numbers for positive integers")
1098

    
1099
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1100
                             action="store", default=None,
1101
                             help="Specifies usermode helper for DRBD")
1102

    
1103
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1104
                                action="store_false", default=True,
1105
                                help="Disable support for DRBD")
1106

    
1107
PRIMARY_IP_VERSION_OPT = \
1108
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1109
               action="store", dest="primary_ip_version",
1110
               metavar="%d|%d" % (constants.IP4_VERSION,
1111
                                  constants.IP6_VERSION),
1112
               help="Cluster-wide IP version for primary IP")
1113

    
1114
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1115
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1116
                          choices=_PRIONAME_TO_VALUE.keys(),
1117
                          help="Priority for opcode processing")
1118

    
1119
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1120
                        type="bool", default=None, metavar=_YORNO,
1121
                        help="Sets the hidden flag on the OS")
1122

    
1123
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1124
                        type="bool", default=None, metavar=_YORNO,
1125
                        help="Sets the blacklisted flag on the OS")
1126

    
1127
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1128
                                     type="bool", metavar=_YORNO,
1129
                                     dest="prealloc_wipe_disks",
1130
                                     help=("Wipe disks prior to instance"
1131
                                           " creation"))
1132

    
1133
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1134
                             type="keyval", default=None,
1135
                             help="Node parameters")
1136

    
1137
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1138
                              action="store", metavar="POLICY", default=None,
1139
                              help="Allocation policy for the node group")
1140

    
1141
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1142
                              type="bool", metavar=_YORNO,
1143
                              dest="node_powered",
1144
                              help="Specify if the SoR for node is powered")
1145

    
1146

    
1147
#: Options provided by all commands
1148
COMMON_OPTS = [DEBUG_OPT]
1149

    
1150
# common options for creating instances. add and import then add their own
1151
# specific ones.
1152
COMMON_CREATE_OPTS = [
1153
  BACKEND_OPT,
1154
  DISK_OPT,
1155
  DISK_TEMPLATE_OPT,
1156
  FILESTORE_DIR_OPT,
1157
  FILESTORE_DRIVER_OPT,
1158
  HYPERVISOR_OPT,
1159
  IALLOCATOR_OPT,
1160
  NET_OPT,
1161
  NODE_PLACEMENT_OPT,
1162
  NOIPCHECK_OPT,
1163
  NONAMECHECK_OPT,
1164
  NONICS_OPT,
1165
  NWSYNC_OPT,
1166
  OSPARAMS_OPT,
1167
  OS_SIZE_OPT,
1168
  SUBMIT_OPT,
1169
  DRY_RUN_OPT,
1170
  PRIORITY_OPT,
1171
  ]
1172

    
1173

    
1174
_RSTATUS_TO_TEXT = {
1175
  constants.RS_UNKNOWN: "(unknown)",
1176
  constants.RS_NODATA: "(nodata)",
1177
  constants.RS_UNAVAIL: "(unavail)",
1178
  constants.RS_OFFLINE: "(offline)",
1179
  }
1180

    
1181

    
1182
def _ParseArgs(argv, commands, aliases):
1183
  """Parser for the command line arguments.
1184

1185
  This function parses the arguments and returns the function which
1186
  must be executed together with its (modified) arguments.
1187

1188
  @param argv: the command line
1189
  @param commands: dictionary with special contents, see the design
1190
      doc for cmdline handling
1191
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1192

1193
  """
1194
  if len(argv) == 0:
1195
    binary = "<command>"
1196
  else:
1197
    binary = argv[0].split("/")[-1]
1198

    
1199
  if len(argv) > 1 and argv[1] == "--version":
1200
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1201
             constants.RELEASE_VERSION)
1202
    # Quit right away. That way we don't have to care about this special
1203
    # argument. optparse.py does it the same.
1204
    sys.exit(0)
1205

    
1206
  if len(argv) < 2 or not (argv[1] in commands or
1207
                           argv[1] in aliases):
1208
    # let's do a nice thing
1209
    sortedcmds = commands.keys()
1210
    sortedcmds.sort()
1211

    
1212
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1213
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1214
    ToStdout("")
1215

    
1216
    # compute the max line length for cmd + usage
1217
    mlen = max([len(" %s" % cmd) for cmd in commands])
1218
    mlen = min(60, mlen) # should not get here...
1219

    
1220
    # and format a nice command list
1221
    ToStdout("Commands:")
1222
    for cmd in sortedcmds:
1223
      cmdstr = " %s" % (cmd,)
1224
      help_text = commands[cmd][4]
1225
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1226
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1227
      for line in help_lines:
1228
        ToStdout("%-*s   %s", mlen, "", line)
1229

    
1230
    ToStdout("")
1231

    
1232
    return None, None, None
1233

    
1234
  # get command, unalias it, and look it up in commands
1235
  cmd = argv.pop(1)
1236
  if cmd in aliases:
1237
    if cmd in commands:
1238
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1239
                                   " command" % cmd)
1240

    
1241
    if aliases[cmd] not in commands:
1242
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1243
                                   " command '%s'" % (cmd, aliases[cmd]))
1244

    
1245
    cmd = aliases[cmd]
1246

    
1247
  func, args_def, parser_opts, usage, description = commands[cmd]
1248
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1249
                        description=description,
1250
                        formatter=TitledHelpFormatter(),
1251
                        usage="%%prog %s %s" % (cmd, usage))
1252
  parser.disable_interspersed_args()
1253
  options, args = parser.parse_args()
1254

    
1255
  if not _CheckArguments(cmd, args_def, args):
1256
    return None, None, None
1257

    
1258
  return func, options, args
1259

    
1260

    
1261
def _CheckArguments(cmd, args_def, args):
1262
  """Verifies the arguments using the argument definition.
1263

1264
  Algorithm:
1265

1266
    1. Abort with error if values specified by user but none expected.
1267

1268
    1. For each argument in definition
1269

1270
      1. Keep running count of minimum number of values (min_count)
1271
      1. Keep running count of maximum number of values (max_count)
1272
      1. If it has an unlimited number of values
1273

1274
        1. Abort with error if it's not the last argument in the definition
1275

1276
    1. If last argument has limited number of values
1277

1278
      1. Abort with error if number of values doesn't match or is too large
1279

1280
    1. Abort with error if user didn't pass enough values (min_count)
1281

1282
  """
1283
  if args and not args_def:
1284
    ToStderr("Error: Command %s expects no arguments", cmd)
1285
    return False
1286

    
1287
  min_count = None
1288
  max_count = None
1289
  check_max = None
1290

    
1291
  last_idx = len(args_def) - 1
1292

    
1293
  for idx, arg in enumerate(args_def):
1294
    if min_count is None:
1295
      min_count = arg.min
1296
    elif arg.min is not None:
1297
      min_count += arg.min
1298

    
1299
    if max_count is None:
1300
      max_count = arg.max
1301
    elif arg.max is not None:
1302
      max_count += arg.max
1303

    
1304
    if idx == last_idx:
1305
      check_max = (arg.max is not None)
1306

    
1307
    elif arg.max is None:
1308
      raise errors.ProgrammerError("Only the last argument can have max=None")
1309

    
1310
  if check_max:
1311
    # Command with exact number of arguments
1312
    if (min_count is not None and max_count is not None and
1313
        min_count == max_count and len(args) != min_count):
1314
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1315
      return False
1316

    
1317
    # Command with limited number of arguments
1318
    if max_count is not None and len(args) > max_count:
1319
      ToStderr("Error: Command %s expects only %d argument(s)",
1320
               cmd, max_count)
1321
      return False
1322

    
1323
  # Command with some required arguments
1324
  if min_count is not None and len(args) < min_count:
1325
    ToStderr("Error: Command %s expects at least %d argument(s)",
1326
             cmd, min_count)
1327
    return False
1328

    
1329
  return True
1330

    
1331

    
1332
def SplitNodeOption(value):
1333
  """Splits the value of a --node option.
1334

1335
  """
1336
  if value and ':' in value:
1337
    return value.split(':', 1)
1338
  else:
1339
    return (value, None)
1340

    
1341

    
1342
def CalculateOSNames(os_name, os_variants):
1343
  """Calculates all the names an OS can be called, according to its variants.
1344

1345
  @type os_name: string
1346
  @param os_name: base name of the os
1347
  @type os_variants: list or None
1348
  @param os_variants: list of supported variants
1349
  @rtype: list
1350
  @return: list of valid names
1351

1352
  """
1353
  if os_variants:
1354
    return ['%s+%s' % (os_name, v) for v in os_variants]
1355
  else:
1356
    return [os_name]
1357

    
1358

    
1359
def ParseFields(selected, default):
1360
  """Parses the values of "--field"-like options.
1361

1362
  @type selected: string or None
1363
  @param selected: User-selected options
1364
  @type default: list
1365
  @param default: Default fields
1366

1367
  """
1368
  if selected is None:
1369
    return default
1370

    
1371
  if selected.startswith("+"):
1372
    return default + selected[1:].split(",")
1373

    
1374
  return selected.split(",")
1375

    
1376

    
1377
UsesRPC = rpc.RunWithRPC
1378

    
1379

    
1380
def AskUser(text, choices=None):
1381
  """Ask the user a question.
1382

1383
  @param text: the question to ask
1384

1385
  @param choices: list with elements tuples (input_char, return_value,
1386
      description); if not given, it will default to: [('y', True,
1387
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1388
      note that the '?' char is reserved for help
1389

1390
  @return: one of the return values from the choices list; if input is
1391
      not possible (i.e. not running with a tty, we return the last
1392
      entry from the list
1393

1394
  """
1395
  if choices is None:
1396
    choices = [('y', True, 'Perform the operation'),
1397
               ('n', False, 'Do not perform the operation')]
1398
  if not choices or not isinstance(choices, list):
1399
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1400
  for entry in choices:
1401
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1402
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1403

    
1404
  answer = choices[-1][1]
1405
  new_text = []
1406
  for line in text.splitlines():
1407
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1408
  text = "\n".join(new_text)
1409
  try:
1410
    f = file("/dev/tty", "a+")
1411
  except IOError:
1412
    return answer
1413
  try:
1414
    chars = [entry[0] for entry in choices]
1415
    chars[-1] = "[%s]" % chars[-1]
1416
    chars.append('?')
1417
    maps = dict([(entry[0], entry[1]) for entry in choices])
1418
    while True:
1419
      f.write(text)
1420
      f.write('\n')
1421
      f.write("/".join(chars))
1422
      f.write(": ")
1423
      line = f.readline(2).strip().lower()
1424
      if line in maps:
1425
        answer = maps[line]
1426
        break
1427
      elif line == '?':
1428
        for entry in choices:
1429
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1430
        f.write("\n")
1431
        continue
1432
  finally:
1433
    f.close()
1434
  return answer
1435

    
1436

    
1437
class JobSubmittedException(Exception):
1438
  """Job was submitted, client should exit.
1439

1440
  This exception has one argument, the ID of the job that was
1441
  submitted. The handler should print this ID.
1442

1443
  This is not an error, just a structured way to exit from clients.
1444

1445
  """
1446

    
1447

    
1448
def SendJob(ops, cl=None):
1449
  """Function to submit an opcode without waiting for the results.
1450

1451
  @type ops: list
1452
  @param ops: list of opcodes
1453
  @type cl: luxi.Client
1454
  @param cl: the luxi client to use for communicating with the master;
1455
             if None, a new client will be created
1456

1457
  """
1458
  if cl is None:
1459
    cl = GetClient()
1460

    
1461
  job_id = cl.SubmitJob(ops)
1462

    
1463
  return job_id
1464

    
1465

    
1466
def GenericPollJob(job_id, cbs, report_cbs):
1467
  """Generic job-polling function.
1468

1469
  @type job_id: number
1470
  @param job_id: Job ID
1471
  @type cbs: Instance of L{JobPollCbBase}
1472
  @param cbs: Data callbacks
1473
  @type report_cbs: Instance of L{JobPollReportCbBase}
1474
  @param report_cbs: Reporting callbacks
1475

1476
  """
1477
  prev_job_info = None
1478
  prev_logmsg_serial = None
1479

    
1480
  status = None
1481

    
1482
  while True:
1483
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1484
                                      prev_logmsg_serial)
1485
    if not result:
1486
      # job not found, go away!
1487
      raise errors.JobLost("Job with id %s lost" % job_id)
1488

    
1489
    if result == constants.JOB_NOTCHANGED:
1490
      report_cbs.ReportNotChanged(job_id, status)
1491

    
1492
      # Wait again
1493
      continue
1494

    
1495
    # Split result, a tuple of (field values, log entries)
1496
    (job_info, log_entries) = result
1497
    (status, ) = job_info
1498

    
1499
    if log_entries:
1500
      for log_entry in log_entries:
1501
        (serial, timestamp, log_type, message) = log_entry
1502
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1503
                                    log_type, message)
1504
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1505

    
1506
    # TODO: Handle canceled and archived jobs
1507
    elif status in (constants.JOB_STATUS_SUCCESS,
1508
                    constants.JOB_STATUS_ERROR,
1509
                    constants.JOB_STATUS_CANCELING,
1510
                    constants.JOB_STATUS_CANCELED):
1511
      break
1512

    
1513
    prev_job_info = job_info
1514

    
1515
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1516
  if not jobs:
1517
    raise errors.JobLost("Job with id %s lost" % job_id)
1518

    
1519
  status, opstatus, result = jobs[0]
1520

    
1521
  if status == constants.JOB_STATUS_SUCCESS:
1522
    return result
1523

    
1524
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1525
    raise errors.OpExecError("Job was canceled")
1526

    
1527
  has_ok = False
1528
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1529
    if status == constants.OP_STATUS_SUCCESS:
1530
      has_ok = True
1531
    elif status == constants.OP_STATUS_ERROR:
1532
      errors.MaybeRaise(msg)
1533

    
1534
      if has_ok:
1535
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1536
                                 (idx, msg))
1537

    
1538
      raise errors.OpExecError(str(msg))
1539

    
1540
  # default failure mode
1541
  raise errors.OpExecError(result)
1542

    
1543

    
1544
class JobPollCbBase:
1545
  """Base class for L{GenericPollJob} callbacks.
1546

1547
  """
1548
  def __init__(self):
1549
    """Initializes this class.
1550

1551
    """
1552

    
1553
  def WaitForJobChangeOnce(self, job_id, fields,
1554
                           prev_job_info, prev_log_serial):
1555
    """Waits for changes on a job.
1556

1557
    """
1558
    raise NotImplementedError()
1559

    
1560
  def QueryJobs(self, job_ids, fields):
1561
    """Returns the selected fields for the selected job IDs.
1562

1563
    @type job_ids: list of numbers
1564
    @param job_ids: Job IDs
1565
    @type fields: list of strings
1566
    @param fields: Fields
1567

1568
    """
1569
    raise NotImplementedError()
1570

    
1571

    
1572
class JobPollReportCbBase:
1573
  """Base class for L{GenericPollJob} reporting callbacks.
1574

1575
  """
1576
  def __init__(self):
1577
    """Initializes this class.
1578

1579
    """
1580

    
1581
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1582
    """Handles a log message.
1583

1584
    """
1585
    raise NotImplementedError()
1586

    
1587
  def ReportNotChanged(self, job_id, status):
1588
    """Called for if a job hasn't changed in a while.
1589

1590
    @type job_id: number
1591
    @param job_id: Job ID
1592
    @type status: string or None
1593
    @param status: Job status if available
1594

1595
    """
1596
    raise NotImplementedError()
1597

    
1598

    
1599
class _LuxiJobPollCb(JobPollCbBase):
1600
  def __init__(self, cl):
1601
    """Initializes this class.
1602

1603
    """
1604
    JobPollCbBase.__init__(self)
1605
    self.cl = cl
1606

    
1607
  def WaitForJobChangeOnce(self, job_id, fields,
1608
                           prev_job_info, prev_log_serial):
1609
    """Waits for changes on a job.
1610

1611
    """
1612
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1613
                                        prev_job_info, prev_log_serial)
1614

    
1615
  def QueryJobs(self, job_ids, fields):
1616
    """Returns the selected fields for the selected job IDs.
1617

1618
    """
1619
    return self.cl.QueryJobs(job_ids, fields)
1620

    
1621

    
1622
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1623
  def __init__(self, feedback_fn):
1624
    """Initializes this class.
1625

1626
    """
1627
    JobPollReportCbBase.__init__(self)
1628

    
1629
    self.feedback_fn = feedback_fn
1630

    
1631
    assert callable(feedback_fn)
1632

    
1633
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1634
    """Handles a log message.
1635

1636
    """
1637
    self.feedback_fn((timestamp, log_type, log_msg))
1638

    
1639
  def ReportNotChanged(self, job_id, status):
1640
    """Called if a job hasn't changed in a while.
1641

1642
    """
1643
    # Ignore
1644

    
1645

    
1646
class StdioJobPollReportCb(JobPollReportCbBase):
1647
  def __init__(self):
1648
    """Initializes this class.
1649

1650
    """
1651
    JobPollReportCbBase.__init__(self)
1652

    
1653
    self.notified_queued = False
1654
    self.notified_waitlock = False
1655

    
1656
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1657
    """Handles a log message.
1658

1659
    """
1660
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1661
             FormatLogMessage(log_type, log_msg))
1662

    
1663
  def ReportNotChanged(self, job_id, status):
1664
    """Called if a job hasn't changed in a while.
1665

1666
    """
1667
    if status is None:
1668
      return
1669

    
1670
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1671
      ToStderr("Job %s is waiting in queue", job_id)
1672
      self.notified_queued = True
1673

    
1674
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1675
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1676
      self.notified_waitlock = True
1677

    
1678

    
1679
def FormatLogMessage(log_type, log_msg):
1680
  """Formats a job message according to its type.
1681

1682
  """
1683
  if log_type != constants.ELOG_MESSAGE:
1684
    log_msg = str(log_msg)
1685

    
1686
  return utils.SafeEncode(log_msg)
1687

    
1688

    
1689
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1690
  """Function to poll for the result of a job.
1691

1692
  @type job_id: job identified
1693
  @param job_id: the job to poll for results
1694
  @type cl: luxi.Client
1695
  @param cl: the luxi client to use for communicating with the master;
1696
             if None, a new client will be created
1697

1698
  """
1699
  if cl is None:
1700
    cl = GetClient()
1701

    
1702
  if reporter is None:
1703
    if feedback_fn:
1704
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1705
    else:
1706
      reporter = StdioJobPollReportCb()
1707
  elif feedback_fn:
1708
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1709

    
1710
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1711

    
1712

    
1713
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1714
  """Legacy function to submit an opcode.
1715

1716
  This is just a simple wrapper over the construction of the processor
1717
  instance. It should be extended to better handle feedback and
1718
  interaction functions.
1719

1720
  """
1721
  if cl is None:
1722
    cl = GetClient()
1723

    
1724
  SetGenericOpcodeOpts([op], opts)
1725

    
1726
  job_id = SendJob([op], cl=cl)
1727

    
1728
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1729
                       reporter=reporter)
1730

    
1731
  return op_results[0]
1732

    
1733

    
1734
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1735
  """Wrapper around SubmitOpCode or SendJob.
1736

1737
  This function will decide, based on the 'opts' parameter, whether to
1738
  submit and wait for the result of the opcode (and return it), or
1739
  whether to just send the job and print its identifier. It is used in
1740
  order to simplify the implementation of the '--submit' option.
1741

1742
  It will also process the opcodes if we're sending the via SendJob
1743
  (otherwise SubmitOpCode does it).
1744

1745
  """
1746
  if opts and opts.submit_only:
1747
    job = [op]
1748
    SetGenericOpcodeOpts(job, opts)
1749
    job_id = SendJob(job, cl=cl)
1750
    raise JobSubmittedException(job_id)
1751
  else:
1752
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1753

    
1754

    
1755
def SetGenericOpcodeOpts(opcode_list, options):
1756
  """Processor for generic options.
1757

1758
  This function updates the given opcodes based on generic command
1759
  line options (like debug, dry-run, etc.).
1760

1761
  @param opcode_list: list of opcodes
1762
  @param options: command line options or None
1763
  @return: None (in-place modification)
1764

1765
  """
1766
  if not options:
1767
    return
1768
  for op in opcode_list:
1769
    op.debug_level = options.debug
1770
    if hasattr(options, "dry_run"):
1771
      op.dry_run = options.dry_run
1772
    if getattr(options, "priority", None) is not None:
1773
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1774

    
1775

    
1776
def GetClient():
1777
  # TODO: Cache object?
1778
  try:
1779
    client = luxi.Client()
1780
  except luxi.NoMasterError:
1781
    ss = ssconf.SimpleStore()
1782

    
1783
    # Try to read ssconf file
1784
    try:
1785
      ss.GetMasterNode()
1786
    except errors.ConfigurationError:
1787
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1788
                                 " not part of a cluster")
1789

    
1790
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1791
    if master != myself:
1792
      raise errors.OpPrereqError("This is not the master node, please connect"
1793
                                 " to node '%s' and rerun the command" %
1794
                                 master)
1795
    raise
1796
  return client
1797

    
1798

    
1799
def FormatError(err):
1800
  """Return a formatted error message for a given error.
1801

1802
  This function takes an exception instance and returns a tuple
1803
  consisting of two values: first, the recommended exit code, and
1804
  second, a string describing the error message (not
1805
  newline-terminated).
1806

1807
  """
1808
  retcode = 1
1809
  obuf = StringIO()
1810
  msg = str(err)
1811
  if isinstance(err, errors.ConfigurationError):
1812
    txt = "Corrupt configuration file: %s" % msg
1813
    logging.error(txt)
1814
    obuf.write(txt + "\n")
1815
    obuf.write("Aborting.")
1816
    retcode = 2
1817
  elif isinstance(err, errors.HooksAbort):
1818
    obuf.write("Failure: hooks execution failed:\n")
1819
    for node, script, out in err.args[0]:
1820
      if out:
1821
        obuf.write("  node: %s, script: %s, output: %s\n" %
1822
                   (node, script, out))
1823
      else:
1824
        obuf.write("  node: %s, script: %s (no output)\n" %
1825
                   (node, script))
1826
  elif isinstance(err, errors.HooksFailure):
1827
    obuf.write("Failure: hooks general failure: %s" % msg)
1828
  elif isinstance(err, errors.ResolverError):
1829
    this_host = netutils.Hostname.GetSysName()
1830
    if err.args[0] == this_host:
1831
      msg = "Failure: can't resolve my own hostname ('%s')"
1832
    else:
1833
      msg = "Failure: can't resolve hostname '%s'"
1834
    obuf.write(msg % err.args[0])
1835
  elif isinstance(err, errors.OpPrereqError):
1836
    if len(err.args) == 2:
1837
      obuf.write("Failure: prerequisites not met for this"
1838
               " operation:\nerror type: %s, error details:\n%s" %
1839
                 (err.args[1], err.args[0]))
1840
    else:
1841
      obuf.write("Failure: prerequisites not met for this"
1842
                 " operation:\n%s" % msg)
1843
  elif isinstance(err, errors.OpExecError):
1844
    obuf.write("Failure: command execution error:\n%s" % msg)
1845
  elif isinstance(err, errors.TagError):
1846
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1847
  elif isinstance(err, errors.JobQueueDrainError):
1848
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1849
               " accept new requests\n")
1850
  elif isinstance(err, errors.JobQueueFull):
1851
    obuf.write("Failure: the job queue is full and doesn't accept new"
1852
               " job submissions until old jobs are archived\n")
1853
  elif isinstance(err, errors.TypeEnforcementError):
1854
    obuf.write("Parameter Error: %s" % msg)
1855
  elif isinstance(err, errors.ParameterError):
1856
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1857
  elif isinstance(err, luxi.NoMasterError):
1858
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1859
               " and listening for connections?")
1860
  elif isinstance(err, luxi.TimeoutError):
1861
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1862
               " been submitted and will continue to run even if the call"
1863
               " timed out. Useful commands in this situation are \"gnt-job"
1864
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1865
    obuf.write(msg)
1866
  elif isinstance(err, luxi.PermissionError):
1867
    obuf.write("It seems you don't have permissions to connect to the"
1868
               " master daemon.\nPlease retry as a different user.")
1869
  elif isinstance(err, luxi.ProtocolError):
1870
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1871
               "%s" % msg)
1872
  elif isinstance(err, errors.JobLost):
1873
    obuf.write("Error checking job status: %s" % msg)
1874
  elif isinstance(err, errors.GenericError):
1875
    obuf.write("Unhandled Ganeti error: %s" % msg)
1876
  elif isinstance(err, JobSubmittedException):
1877
    obuf.write("JobID: %s\n" % err.args[0])
1878
    retcode = 0
1879
  else:
1880
    obuf.write("Unhandled exception: %s" % msg)
1881
  return retcode, obuf.getvalue().rstrip('\n')
1882

    
1883

    
1884
def GenericMain(commands, override=None, aliases=None):
1885
  """Generic main function for all the gnt-* commands.
1886

1887
  Arguments:
1888
    - commands: a dictionary with a special structure, see the design doc
1889
                for command line handling.
1890
    - override: if not None, we expect a dictionary with keys that will
1891
                override command line options; this can be used to pass
1892
                options from the scripts to generic functions
1893
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1894

1895
  """
1896
  # save the program name and the entire command line for later logging
1897
  if sys.argv:
1898
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1899
    if len(sys.argv) >= 2:
1900
      binary += " " + sys.argv[1]
1901
      old_cmdline = " ".join(sys.argv[2:])
1902
    else:
1903
      old_cmdline = ""
1904
  else:
1905
    binary = "<unknown program>"
1906
    old_cmdline = ""
1907

    
1908
  if aliases is None:
1909
    aliases = {}
1910

    
1911
  try:
1912
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1913
  except errors.ParameterError, err:
1914
    result, err_msg = FormatError(err)
1915
    ToStderr(err_msg)
1916
    return 1
1917

    
1918
  if func is None: # parse error
1919
    return 1
1920

    
1921
  if override is not None:
1922
    for key, val in override.iteritems():
1923
      setattr(options, key, val)
1924

    
1925
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1926
                     stderr_logging=True, program=binary)
1927

    
1928
  if old_cmdline:
1929
    logging.info("run with arguments '%s'", old_cmdline)
1930
  else:
1931
    logging.info("run with no arguments")
1932

    
1933
  try:
1934
    result = func(options, args)
1935
  except (errors.GenericError, luxi.ProtocolError,
1936
          JobSubmittedException), err:
1937
    result, err_msg = FormatError(err)
1938
    logging.exception("Error during command processing")
1939
    ToStderr(err_msg)
1940

    
1941
  return result
1942

    
1943

    
1944
def ParseNicOption(optvalue):
1945
  """Parses the value of the --net option(s).
1946

1947
  """
1948
  try:
1949
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1950
  except (TypeError, ValueError), err:
1951
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1952

    
1953
  nics = [{}] * nic_max
1954
  for nidx, ndict in optvalue:
1955
    nidx = int(nidx)
1956

    
1957
    if not isinstance(ndict, dict):
1958
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1959
                                 " got %s" % (nidx, ndict))
1960

    
1961
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1962

    
1963
    nics[nidx] = ndict
1964

    
1965
  return nics
1966

    
1967

    
1968
def GenericInstanceCreate(mode, opts, args):
1969
  """Add an instance to the cluster via either creation or import.
1970

1971
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1972
  @param opts: the command line options selected by the user
1973
  @type args: list
1974
  @param args: should contain only one element, the new instance name
1975
  @rtype: int
1976
  @return: the desired exit code
1977

1978
  """
1979
  instance = args[0]
1980

    
1981
  (pnode, snode) = SplitNodeOption(opts.node)
1982

    
1983
  hypervisor = None
1984
  hvparams = {}
1985
  if opts.hypervisor:
1986
    hypervisor, hvparams = opts.hypervisor
1987

    
1988
  if opts.nics:
1989
    nics = ParseNicOption(opts.nics)
1990
  elif opts.no_nics:
1991
    # no nics
1992
    nics = []
1993
  elif mode == constants.INSTANCE_CREATE:
1994
    # default of one nic, all auto
1995
    nics = [{}]
1996
  else:
1997
    # mode == import
1998
    nics = []
1999

    
2000
  if opts.disk_template == constants.DT_DISKLESS:
2001
    if opts.disks or opts.sd_size is not None:
2002
      raise errors.OpPrereqError("Diskless instance but disk"
2003
                                 " information passed")
2004
    disks = []
2005
  else:
2006
    if (not opts.disks and not opts.sd_size
2007
        and mode == constants.INSTANCE_CREATE):
2008
      raise errors.OpPrereqError("No disk information specified")
2009
    if opts.disks and opts.sd_size is not None:
2010
      raise errors.OpPrereqError("Please use either the '--disk' or"
2011
                                 " '-s' option")
2012
    if opts.sd_size is not None:
2013
      opts.disks = [(0, {"size": opts.sd_size})]
2014

    
2015
    if opts.disks:
2016
      try:
2017
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2018
      except ValueError, err:
2019
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2020
      disks = [{}] * disk_max
2021
    else:
2022
      disks = []
2023
    for didx, ddict in opts.disks:
2024
      didx = int(didx)
2025
      if not isinstance(ddict, dict):
2026
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2027
        raise errors.OpPrereqError(msg)
2028
      elif "size" in ddict:
2029
        if "adopt" in ddict:
2030
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2031
                                     " (disk %d)" % didx)
2032
        try:
2033
          ddict["size"] = utils.ParseUnit(ddict["size"])
2034
        except ValueError, err:
2035
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2036
                                     (didx, err))
2037
      elif "adopt" in ddict:
2038
        if mode == constants.INSTANCE_IMPORT:
2039
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2040
                                     " import")
2041
        ddict["size"] = 0
2042
      else:
2043
        raise errors.OpPrereqError("Missing size or adoption source for"
2044
                                   " disk %d" % didx)
2045
      disks[didx] = ddict
2046

    
2047
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2048
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2049

    
2050
  if mode == constants.INSTANCE_CREATE:
2051
    start = opts.start
2052
    os_type = opts.os
2053
    force_variant = opts.force_variant
2054
    src_node = None
2055
    src_path = None
2056
    no_install = opts.no_install
2057
    identify_defaults = False
2058
  elif mode == constants.INSTANCE_IMPORT:
2059
    start = False
2060
    os_type = None
2061
    force_variant = False
2062
    src_node = opts.src_node
2063
    src_path = opts.src_dir
2064
    no_install = None
2065
    identify_defaults = opts.identify_defaults
2066
  else:
2067
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2068

    
2069
  op = opcodes.OpInstanceCreate(instance_name=instance,
2070
                                disks=disks,
2071
                                disk_template=opts.disk_template,
2072
                                nics=nics,
2073
                                pnode=pnode, snode=snode,
2074
                                ip_check=opts.ip_check,
2075
                                name_check=opts.name_check,
2076
                                wait_for_sync=opts.wait_for_sync,
2077
                                file_storage_dir=opts.file_storage_dir,
2078
                                file_driver=opts.file_driver,
2079
                                iallocator=opts.iallocator,
2080
                                hypervisor=hypervisor,
2081
                                hvparams=hvparams,
2082
                                beparams=opts.beparams,
2083
                                osparams=opts.osparams,
2084
                                mode=mode,
2085
                                start=start,
2086
                                os_type=os_type,
2087
                                force_variant=force_variant,
2088
                                src_node=src_node,
2089
                                src_path=src_path,
2090
                                no_install=no_install,
2091
                                identify_defaults=identify_defaults)
2092

    
2093
  SubmitOrSend(op, opts)
2094
  return 0
2095

    
2096

    
2097
class _RunWhileClusterStoppedHelper:
2098
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2099

2100
  """
2101
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2102
    """Initializes this class.
2103

2104
    @type feedback_fn: callable
2105
    @param feedback_fn: Feedback function
2106
    @type cluster_name: string
2107
    @param cluster_name: Cluster name
2108
    @type master_node: string
2109
    @param master_node Master node name
2110
    @type online_nodes: list
2111
    @param online_nodes: List of names of online nodes
2112

2113
    """
2114
    self.feedback_fn = feedback_fn
2115
    self.cluster_name = cluster_name
2116
    self.master_node = master_node
2117
    self.online_nodes = online_nodes
2118

    
2119
    self.ssh = ssh.SshRunner(self.cluster_name)
2120

    
2121
    self.nonmaster_nodes = [name for name in online_nodes
2122
                            if name != master_node]
2123

    
2124
    assert self.master_node not in self.nonmaster_nodes
2125

    
2126
  def _RunCmd(self, node_name, cmd):
2127
    """Runs a command on the local or a remote machine.
2128

2129
    @type node_name: string
2130
    @param node_name: Machine name
2131
    @type cmd: list
2132
    @param cmd: Command
2133

2134
    """
2135
    if node_name is None or node_name == self.master_node:
2136
      # No need to use SSH
2137
      result = utils.RunCmd(cmd)
2138
    else:
2139
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2140

    
2141
    if result.failed:
2142
      errmsg = ["Failed to run command %s" % result.cmd]
2143
      if node_name:
2144
        errmsg.append("on node %s" % node_name)
2145
      errmsg.append(": exitcode %s and error %s" %
2146
                    (result.exit_code, result.output))
2147
      raise errors.OpExecError(" ".join(errmsg))
2148

    
2149
  def Call(self, fn, *args):
2150
    """Call function while all daemons are stopped.
2151

2152
    @type fn: callable
2153
    @param fn: Function to be called
2154

2155
    """
2156
    # Pause watcher by acquiring an exclusive lock on watcher state file
2157
    self.feedback_fn("Blocking watcher")
2158
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2159
    try:
2160
      # TODO: Currently, this just blocks. There's no timeout.
2161
      # TODO: Should it be a shared lock?
2162
      watcher_block.Exclusive(blocking=True)
2163

    
2164
      # Stop master daemons, so that no new jobs can come in and all running
2165
      # ones are finished
2166
      self.feedback_fn("Stopping master daemons")
2167
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2168
      try:
2169
        # Stop daemons on all nodes
2170
        for node_name in self.online_nodes:
2171
          self.feedback_fn("Stopping daemons on %s" % node_name)
2172
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2173

    
2174
        # All daemons are shut down now
2175
        try:
2176
          return fn(self, *args)
2177
        except Exception, err:
2178
          _, errmsg = FormatError(err)
2179
          logging.exception("Caught exception")
2180
          self.feedback_fn(errmsg)
2181
          raise
2182
      finally:
2183
        # Start cluster again, master node last
2184
        for node_name in self.nonmaster_nodes + [self.master_node]:
2185
          self.feedback_fn("Starting daemons on %s" % node_name)
2186
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2187
    finally:
2188
      # Resume watcher
2189
      watcher_block.Close()
2190

    
2191

    
2192
def RunWhileClusterStopped(feedback_fn, fn, *args):
2193
  """Calls a function while all cluster daemons are stopped.
2194

2195
  @type feedback_fn: callable
2196
  @param feedback_fn: Feedback function
2197
  @type fn: callable
2198
  @param fn: Function to be called when daemons are stopped
2199

2200
  """
2201
  feedback_fn("Gathering cluster information")
2202

    
2203
  # This ensures we're running on the master daemon
2204
  cl = GetClient()
2205

    
2206
  (cluster_name, master_node) = \
2207
    cl.QueryConfigValues(["cluster_name", "master_node"])
2208

    
2209
  online_nodes = GetOnlineNodes([], cl=cl)
2210

    
2211
  # Don't keep a reference to the client. The master daemon will go away.
2212
  del cl
2213

    
2214
  assert master_node in online_nodes
2215

    
2216
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2217
                                       online_nodes).Call(fn, *args)
2218

    
2219

    
2220
def GenerateTable(headers, fields, separator, data,
2221
                  numfields=None, unitfields=None,
2222
                  units=None):
2223
  """Prints a table with headers and different fields.
2224

2225
  @type headers: dict
2226
  @param headers: dictionary mapping field names to headers for
2227
      the table
2228
  @type fields: list
2229
  @param fields: the field names corresponding to each row in
2230
      the data field
2231
  @param separator: the separator to be used; if this is None,
2232
      the default 'smart' algorithm is used which computes optimal
2233
      field width, otherwise just the separator is used between
2234
      each field
2235
  @type data: list
2236
  @param data: a list of lists, each sublist being one row to be output
2237
  @type numfields: list
2238
  @param numfields: a list with the fields that hold numeric
2239
      values and thus should be right-aligned
2240
  @type unitfields: list
2241
  @param unitfields: a list with the fields that hold numeric
2242
      values that should be formatted with the units field
2243
  @type units: string or None
2244
  @param units: the units we should use for formatting, or None for
2245
      automatic choice (human-readable for non-separator usage, otherwise
2246
      megabytes); this is a one-letter string
2247

2248
  """
2249
  if units is None:
2250
    if separator:
2251
      units = "m"
2252
    else:
2253
      units = "h"
2254

    
2255
  if numfields is None:
2256
    numfields = []
2257
  if unitfields is None:
2258
    unitfields = []
2259

    
2260
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2261
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2262

    
2263
  format_fields = []
2264
  for field in fields:
2265
    if headers and field not in headers:
2266
      # TODO: handle better unknown fields (either revert to old
2267
      # style of raising exception, or deal more intelligently with
2268
      # variable fields)
2269
      headers[field] = field
2270
    if separator is not None:
2271
      format_fields.append("%s")
2272
    elif numfields.Matches(field):
2273
      format_fields.append("%*s")
2274
    else:
2275
      format_fields.append("%-*s")
2276

    
2277
  if separator is None:
2278
    mlens = [0 for name in fields]
2279
    format_str = ' '.join(format_fields)
2280
  else:
2281
    format_str = separator.replace("%", "%%").join(format_fields)
2282

    
2283
  for row in data:
2284
    if row is None:
2285
      continue
2286
    for idx, val in enumerate(row):
2287
      if unitfields.Matches(fields[idx]):
2288
        try:
2289
          val = int(val)
2290
        except (TypeError, ValueError):
2291
          pass
2292
        else:
2293
          val = row[idx] = utils.FormatUnit(val, units)
2294
      val = row[idx] = str(val)
2295
      if separator is None:
2296
        mlens[idx] = max(mlens[idx], len(val))
2297

    
2298
  result = []
2299
  if headers:
2300
    args = []
2301
    for idx, name in enumerate(fields):
2302
      hdr = headers[name]
2303
      if separator is None:
2304
        mlens[idx] = max(mlens[idx], len(hdr))
2305
        args.append(mlens[idx])
2306
      args.append(hdr)
2307
    result.append(format_str % tuple(args))
2308

    
2309
  if separator is None:
2310
    assert len(mlens) == len(fields)
2311

    
2312
    if fields and not numfields.Matches(fields[-1]):
2313
      mlens[-1] = 0
2314

    
2315
  for line in data:
2316
    args = []
2317
    if line is None:
2318
      line = ['-' for _ in fields]
2319
    for idx in range(len(fields)):
2320
      if separator is None:
2321
        args.append(mlens[idx])
2322
      args.append(line[idx])
2323
    result.append(format_str % tuple(args))
2324

    
2325
  return result
2326

    
2327

    
2328
def _FormatBool(value):
2329
  """Formats a boolean value as a string.
2330

2331
  """
2332
  if value:
2333
    return "Y"
2334
  return "N"
2335

    
2336

    
2337
#: Default formatting for query results; (callback, align right)
2338
_DEFAULT_FORMAT_QUERY = {
2339
  constants.QFT_TEXT: (str, False),
2340
  constants.QFT_BOOL: (_FormatBool, False),
2341
  constants.QFT_NUMBER: (str, True),
2342
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2343
  constants.QFT_OTHER: (str, False),
2344
  constants.QFT_UNKNOWN: (str, False),
2345
  }
2346

    
2347

    
2348
def _GetColumnFormatter(fdef, override, unit):
2349
  """Returns formatting function for a field.
2350

2351
  @type fdef: L{objects.QueryFieldDefinition}
2352
  @type override: dict
2353
  @param override: Dictionary for overriding field formatting functions,
2354
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2355
  @type unit: string
2356
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2357
  @rtype: tuple; (callable, bool)
2358
  @return: Returns the function to format a value (takes one parameter) and a
2359
    boolean for aligning the value on the right-hand side
2360

2361
  """
2362
  fmt = override.get(fdef.name, None)
2363
  if fmt is not None:
2364
    return fmt
2365

    
2366
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2367

    
2368
  if fdef.kind == constants.QFT_UNIT:
2369
    # Can't keep this information in the static dictionary
2370
    return (lambda value: utils.FormatUnit(value, unit), True)
2371

    
2372
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2373
  if fmt is not None:
2374
    return fmt
2375

    
2376
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2377

    
2378

    
2379
class _QueryColumnFormatter:
2380
  """Callable class for formatting fields of a query.
2381

2382
  """
2383
  def __init__(self, fn, status_fn):
2384
    """Initializes this class.
2385

2386
    @type fn: callable
2387
    @param fn: Formatting function
2388
    @type status_fn: callable
2389
    @param status_fn: Function to report fields' status
2390

2391
    """
2392
    self._fn = fn
2393
    self._status_fn = status_fn
2394

    
2395
  def __call__(self, data):
2396
    """Returns a field's string representation.
2397

2398
    """
2399
    (status, value) = data
2400

    
2401
    # Report status
2402
    self._status_fn(status)
2403

    
2404
    if status == constants.RS_NORMAL:
2405
      return self._fn(value)
2406

    
2407
    assert value is None, \
2408
           "Found value %r for abnormal status %s" % (value, status)
2409

    
2410
    return FormatResultError(status)
2411

    
2412

    
2413
def FormatResultError(status):
2414
  """Formats result status other than L{constants.RS_NORMAL}.
2415

2416
  @param status: The result status
2417
  @return: Text of result status
2418

2419
  """
2420
  assert status != constants.RS_NORMAL, \
2421
         "FormatResultError called with status equals to constants.RS_NORMAL"
2422
  try:
2423
    return _RSTATUS_TO_TEXT[status]
2424
  except KeyError:
2425
    raise NotImplementedError("Unknown status %s" % status)
2426

    
2427

    
2428
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2429
                      header=False):
2430
  """Formats data in L{objects.QueryResponse}.
2431

2432
  @type result: L{objects.QueryResponse}
2433
  @param result: result of query operation
2434
  @type unit: string
2435
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2436
    see L{utils.text.FormatUnit}
2437
  @type format_override: dict
2438
  @param format_override: Dictionary for overriding field formatting functions,
2439
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2440
  @type separator: string or None
2441
  @param separator: String used to separate fields
2442
  @type header: bool
2443
  @param header: Whether to output header row
2444

2445
  """
2446
  if unit is None:
2447
    if separator:
2448
      unit = "m"
2449
    else:
2450
      unit = "h"
2451

    
2452
  if format_override is None:
2453
    format_override = {}
2454

    
2455
  stats = dict.fromkeys(constants.RS_ALL, 0)
2456

    
2457
  def _RecordStatus(status):
2458
    if status in stats:
2459
      stats[status] += 1
2460

    
2461
  columns = []
2462
  for fdef in result.fields:
2463
    assert fdef.title and fdef.name
2464
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2465
    columns.append(TableColumn(fdef.title,
2466
                               _QueryColumnFormatter(fn, _RecordStatus),
2467
                               align_right))
2468

    
2469
  table = FormatTable(result.data, columns, header, separator)
2470

    
2471
  # Collect statistics
2472
  assert len(stats) == len(constants.RS_ALL)
2473
  assert compat.all(count >= 0 for count in stats.values())
2474

    
2475
  # Determine overall status. If there was no data, unknown fields must be
2476
  # detected via the field definitions.
2477
  if (stats[constants.RS_UNKNOWN] or
2478
      (not result.data and _GetUnknownFields(result.fields))):
2479
    status = QR_UNKNOWN
2480
  elif compat.any(count > 0 for key, count in stats.items()
2481
                  if key != constants.RS_NORMAL):
2482
    status = QR_INCOMPLETE
2483
  else:
2484
    status = QR_NORMAL
2485

    
2486
  return (status, table)
2487

    
2488

    
2489
def _GetUnknownFields(fdefs):
2490
  """Returns list of unknown fields included in C{fdefs}.
2491

2492
  @type fdefs: list of L{objects.QueryFieldDefinition}
2493

2494
  """
2495
  return [fdef for fdef in fdefs
2496
          if fdef.kind == constants.QFT_UNKNOWN]
2497

    
2498

    
2499
def _WarnUnknownFields(fdefs):
2500
  """Prints a warning to stderr if a query included unknown fields.
2501

2502
  @type fdefs: list of L{objects.QueryFieldDefinition}
2503

2504
  """
2505
  unknown = _GetUnknownFields(fdefs)
2506
  if unknown:
2507
    ToStderr("Warning: Queried for unknown fields %s",
2508
             utils.CommaJoin(fdef.name for fdef in unknown))
2509
    return True
2510

    
2511
  return False
2512

    
2513

    
2514
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2515
                format_override=None):
2516
  """Generic implementation for listing all items of a resource.
2517

2518
  @param resource: One of L{constants.QR_OP_LUXI}
2519
  @type fields: list of strings
2520
  @param fields: List of fields to query for
2521
  @type names: list of strings
2522
  @param names: Names of items to query for
2523
  @type unit: string or None
2524
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2525
    None for automatic choice (human-readable for non-separator usage,
2526
    otherwise megabytes); this is a one-letter string
2527
  @type separator: string or None
2528
  @param separator: String used to separate fields
2529
  @type header: bool
2530
  @param header: Whether to show header row
2531
  @type format_override: dict
2532
  @param format_override: Dictionary for overriding field formatting functions,
2533
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2534

2535
  """
2536
  if cl is None:
2537
    cl = GetClient()
2538

    
2539
  if not names:
2540
    names = None
2541

    
2542
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2543

    
2544
  found_unknown = _WarnUnknownFields(response.fields)
2545

    
2546
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2547
                                     header=header,
2548
                                     format_override=format_override)
2549

    
2550
  for line in data:
2551
    ToStdout(line)
2552

    
2553
  assert ((found_unknown and status == QR_UNKNOWN) or
2554
          (not found_unknown and status != QR_UNKNOWN))
2555

    
2556
  if status == QR_UNKNOWN:
2557
    return constants.EXIT_UNKNOWN_FIELD
2558

    
2559
  # TODO: Should the list command fail if not all data could be collected?
2560
  return constants.EXIT_SUCCESS
2561

    
2562

    
2563
def GenericListFields(resource, fields, separator, header, cl=None):
2564
  """Generic implementation for listing fields for a resource.
2565

2566
  @param resource: One of L{constants.QR_OP_LUXI}
2567
  @type fields: list of strings
2568
  @param fields: List of fields to query for
2569
  @type separator: string or None
2570
  @param separator: String used to separate fields
2571
  @type header: bool
2572
  @param header: Whether to show header row
2573

2574
  """
2575
  if cl is None:
2576
    cl = GetClient()
2577

    
2578
  if not fields:
2579
    fields = None
2580

    
2581
  response = cl.QueryFields(resource, fields)
2582

    
2583
  found_unknown = _WarnUnknownFields(response.fields)
2584

    
2585
  columns = [
2586
    TableColumn("Name", str, False),
2587
    TableColumn("Title", str, False),
2588
    # TODO: Add field description to master daemon
2589
    ]
2590

    
2591
  rows = [[fdef.name, fdef.title] for fdef in response.fields]
2592

    
2593
  for line in FormatTable(rows, columns, header, separator):
2594
    ToStdout(line)
2595

    
2596
  if found_unknown:
2597
    return constants.EXIT_UNKNOWN_FIELD
2598

    
2599
  return constants.EXIT_SUCCESS
2600

    
2601

    
2602
class TableColumn:
2603
  """Describes a column for L{FormatTable}.
2604

2605
  """
2606
  def __init__(self, title, fn, align_right):
2607
    """Initializes this class.
2608

2609
    @type title: string
2610
    @param title: Column title
2611
    @type fn: callable
2612
    @param fn: Formatting function
2613
    @type align_right: bool
2614
    @param align_right: Whether to align values on the right-hand side
2615

2616
    """
2617
    self.title = title
2618
    self.format = fn
2619
    self.align_right = align_right
2620

    
2621

    
2622
def _GetColFormatString(width, align_right):
2623
  """Returns the format string for a field.
2624

2625
  """
2626
  if align_right:
2627
    sign = ""
2628
  else:
2629
    sign = "-"
2630

    
2631
  return "%%%s%ss" % (sign, width)
2632

    
2633

    
2634
def FormatTable(rows, columns, header, separator):
2635
  """Formats data as a table.
2636

2637
  @type rows: list of lists
2638
  @param rows: Row data, one list per row
2639
  @type columns: list of L{TableColumn}
2640
  @param columns: Column descriptions
2641
  @type header: bool
2642
  @param header: Whether to show header row
2643
  @type separator: string or None
2644
  @param separator: String used to separate columns
2645

2646
  """
2647
  if header:
2648
    data = [[col.title for col in columns]]
2649
    colwidth = [len(col.title) for col in columns]
2650
  else:
2651
    data = []
2652
    colwidth = [0 for _ in columns]
2653

    
2654
  # Format row data
2655
  for row in rows:
2656
    assert len(row) == len(columns)
2657

    
2658
    formatted = [col.format(value) for value, col in zip(row, columns)]
2659

    
2660
    if separator is None:
2661
      # Update column widths
2662
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2663
        # Modifying a list's items while iterating is fine
2664
        colwidth[idx] = max(oldwidth, len(value))
2665

    
2666
    data.append(formatted)
2667

    
2668
  if separator is not None:
2669
    # Return early if a separator is used
2670
    return [separator.join(row) for row in data]
2671

    
2672
  if columns and not columns[-1].align_right:
2673
    # Avoid unnecessary spaces at end of line
2674
    colwidth[-1] = 0
2675

    
2676
  # Build format string
2677
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2678
                  for col, width in zip(columns, colwidth)])
2679

    
2680
  return [fmt % tuple(row) for row in data]
2681

    
2682

    
2683
def FormatTimestamp(ts):
2684
  """Formats a given timestamp.
2685

2686
  @type ts: timestamp
2687
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2688

2689
  @rtype: string
2690
  @return: a string with the formatted timestamp
2691

2692
  """
2693
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2694
    return '?'
2695
  sec, usec = ts
2696
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2697

    
2698

    
2699
def ParseTimespec(value):
2700
  """Parse a time specification.
2701

2702
  The following suffixed will be recognized:
2703

2704
    - s: seconds
2705
    - m: minutes
2706
    - h: hours
2707
    - d: day
2708
    - w: weeks
2709

2710
  Without any suffix, the value will be taken to be in seconds.
2711

2712
  """
2713
  value = str(value)
2714
  if not value:
2715
    raise errors.OpPrereqError("Empty time specification passed")
2716
  suffix_map = {
2717
    's': 1,
2718
    'm': 60,
2719
    'h': 3600,
2720
    'd': 86400,
2721
    'w': 604800,
2722
    }
2723
  if value[-1] not in suffix_map:
2724
    try:
2725
      value = int(value)
2726
    except (TypeError, ValueError):
2727
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2728
  else:
2729
    multiplier = suffix_map[value[-1]]
2730
    value = value[:-1]
2731
    if not value: # no data left after stripping the suffix
2732
      raise errors.OpPrereqError("Invalid time specification (only"
2733
                                 " suffix passed)")
2734
    try:
2735
      value = int(value) * multiplier
2736
    except (TypeError, ValueError):
2737
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2738
  return value
2739

    
2740

    
2741
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2742
                   filter_master=False):
2743
  """Returns the names of online nodes.
2744

2745
  This function will also log a warning on stderr with the names of
2746
  the online nodes.
2747

2748
  @param nodes: if not empty, use only this subset of nodes (minus the
2749
      offline ones)
2750
  @param cl: if not None, luxi client to use
2751
  @type nowarn: boolean
2752
  @param nowarn: by default, this function will output a note with the
2753
      offline nodes that are skipped; if this parameter is True the
2754
      note is not displayed
2755
  @type secondary_ips: boolean
2756
  @param secondary_ips: if True, return the secondary IPs instead of the
2757
      names, useful for doing network traffic over the replication interface
2758
      (if any)
2759
  @type filter_master: boolean
2760
  @param filter_master: if True, do not return the master node in the list
2761
      (useful in coordination with secondary_ips where we cannot check our
2762
      node name against the list)
2763

2764
  """
2765
  if cl is None:
2766
    cl = GetClient()
2767

    
2768
  if secondary_ips:
2769
    name_idx = 2
2770
  else:
2771
    name_idx = 0
2772

    
2773
  if filter_master:
2774
    master_node = cl.QueryConfigValues(["master_node"])[0]
2775
    filter_fn = lambda x: x != master_node
2776
  else:
2777
    filter_fn = lambda _: True
2778

    
2779
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2780
                         use_locking=False)
2781
  offline = [row[0] for row in result if row[1]]
2782
  if offline and not nowarn:
2783
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2784
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2785

    
2786

    
2787
def _ToStream(stream, txt, *args):
2788
  """Write a message to a stream, bypassing the logging system
2789

2790
  @type stream: file object
2791
  @param stream: the file to which we should write
2792
  @type txt: str
2793
  @param txt: the message
2794

2795
  """
2796
  if args:
2797
    args = tuple(args)
2798
    stream.write(txt % args)
2799
  else:
2800
    stream.write(txt)
2801
  stream.write('\n')
2802
  stream.flush()
2803

    
2804

    
2805
def ToStdout(txt, *args):
2806
  """Write a message to stdout only, bypassing the logging system
2807

2808
  This is just a wrapper over _ToStream.
2809

2810
  @type txt: str
2811
  @param txt: the message
2812

2813
  """
2814
  _ToStream(sys.stdout, txt, *args)
2815

    
2816

    
2817
def ToStderr(txt, *args):
2818
  """Write a message to stderr only, bypassing the logging system
2819

2820
  This is just a wrapper over _ToStream.
2821

2822
  @type txt: str
2823
  @param txt: the message
2824

2825
  """
2826
  _ToStream(sys.stderr, txt, *args)
2827

    
2828

    
2829
class JobExecutor(object):
2830
  """Class which manages the submission and execution of multiple jobs.
2831

2832
  Note that instances of this class should not be reused between
2833
  GetResults() calls.
2834

2835
  """
2836
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2837
    self.queue = []
2838
    if cl is None:
2839
      cl = GetClient()
2840
    self.cl = cl
2841
    self.verbose = verbose
2842
    self.jobs = []
2843
    self.opts = opts
2844
    self.feedback_fn = feedback_fn
2845

    
2846
  def QueueJob(self, name, *ops):
2847
    """Record a job for later submit.
2848

2849
    @type name: string
2850
    @param name: a description of the job, will be used in WaitJobSet
2851
    """
2852
    SetGenericOpcodeOpts(ops, self.opts)
2853
    self.queue.append((name, ops))
2854

    
2855
  def SubmitPending(self, each=False):
2856
    """Submit all pending jobs.
2857

2858
    """
2859
    if each:
2860
      results = []
2861
      for row in self.queue:
2862
        # SubmitJob will remove the success status, but raise an exception if
2863
        # the submission fails, so we'll notice that anyway.
2864
        results.append([True, self.cl.SubmitJob(row[1])])
2865
    else:
2866
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2867
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2868
                                                            self.queue)):
2869
      self.jobs.append((idx, status, data, name))
2870

    
2871
  def _ChooseJob(self):
2872
    """Choose a non-waiting/queued job to poll next.
2873

2874
    """
2875
    assert self.jobs, "_ChooseJob called with empty job list"
2876

    
2877
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2878
    assert result
2879

    
2880
    for job_data, status in zip(self.jobs, result):
2881
      if (isinstance(status, list) and status and
2882
          status[0] in (constants.JOB_STATUS_QUEUED,
2883
                        constants.JOB_STATUS_WAITLOCK,
2884
                        constants.JOB_STATUS_CANCELING)):
2885
        # job is still present and waiting
2886
        continue
2887
      # good candidate found (either running job or lost job)
2888
      self.jobs.remove(job_data)
2889
      return job_data
2890

    
2891
    # no job found
2892
    return self.jobs.pop(0)
2893

    
2894
  def GetResults(self):
2895
    """Wait for and return the results of all jobs.
2896

2897
    @rtype: list
2898
    @return: list of tuples (success, job results), in the same order
2899
        as the submitted jobs; if a job has failed, instead of the result
2900
        there will be the error message
2901

2902
    """
2903
    if not self.jobs:
2904
      self.SubmitPending()
2905
    results = []
2906
    if self.verbose:
2907
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2908
      if ok_jobs:
2909
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2910

    
2911
    # first, remove any non-submitted jobs
2912
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2913
    for idx, _, jid, name in failures:
2914
      ToStderr("Failed to submit job for %s: %s", name, jid)
2915
      results.append((idx, False, jid))
2916

    
2917
    while self.jobs:
2918
      (idx, _, jid, name) = self._ChooseJob()
2919
      ToStdout("Waiting for job %s for %s...", jid, name)
2920
      try:
2921
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2922
        success = True
2923
      except errors.JobLost, err:
2924
        _, job_result = FormatError(err)
2925
        ToStderr("Job %s for %s has been archived, cannot check its result",
2926
                 jid, name)
2927
        success = False
2928
      except (errors.GenericError, luxi.ProtocolError), err:
2929
        _, job_result = FormatError(err)
2930
        success = False
2931
        # the error message will always be shown, verbose or not
2932
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2933

    
2934
      results.append((idx, success, job_result))
2935

    
2936
    # sort based on the index, then drop it
2937
    results.sort()
2938
    results = [i[1:] for i in results]
2939

    
2940
    return results
2941

    
2942
  def WaitOrShow(self, wait):
2943
    """Wait for job results or only print the job IDs.
2944

2945
    @type wait: boolean
2946
    @param wait: whether to wait or not
2947

2948
    """
2949
    if wait:
2950
      return self.GetResults()
2951
    else:
2952
      if not self.jobs:
2953
        self.SubmitPending()
2954
      for _, status, result, name in self.jobs:
2955
        if status:
2956
          ToStdout("%s: %s", result, name)
2957
        else:
2958
          ToStderr("Failure for %s: %s", name, result)
2959
      return [row[1:3] for row in self.jobs]
2960

    
2961

    
2962
def FormatParameterDict(buf, param_dict, actual, level=1):
2963
  """Formats a parameter dictionary.
2964

2965
  @type buf: L{StringIO}
2966
  @param buf: the buffer into which to write
2967
  @type param_dict: dict
2968
  @param param_dict: the own parameters
2969
  @type actual: dict
2970
  @param actual: the current parameter set (including defaults)
2971
  @param level: Level of indent
2972

2973
  """
2974
  indent = "  " * level
2975
  for key in sorted(actual):
2976
    val = param_dict.get(key, "default (%s)" % actual[key])
2977
    buf.write("%s- %s: %s\n" % (indent, key, val))
2978

    
2979

    
2980
def ConfirmOperation(names, list_type, text, extra=""):
2981
  """Ask the user to confirm an operation on a list of list_type.
2982

2983
  This function is used to request confirmation for doing an operation
2984
  on a given list of list_type.
2985

2986
  @type names: list
2987
  @param names: the list of names that we display when
2988
      we ask for confirmation
2989
  @type list_type: str
2990
  @param list_type: Human readable name for elements in the list (e.g. nodes)
2991
  @type text: str
2992
  @param text: the operation that the user should confirm
2993
  @rtype: boolean
2994
  @return: True or False depending on user's confirmation.
2995

2996
  """
2997
  count = len(names)
2998
  msg = ("The %s will operate on %d %s.\n%s"
2999
         "Do you want to continue?" % (text, count, list_type, extra))
3000
  affected = (("\nAffected %s:\n" % list_type) +
3001
              "\n".join(["  %s" % name for name in names]))
3002

    
3003
  choices = [("y", True, "Yes, execute the %s" % text),
3004
             ("n", False, "No, abort the %s" % text)]
3005

    
3006
  if count > 20:
3007
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3008
    question = msg
3009
  else:
3010
    question = msg + affected
3011

    
3012
  choice = AskUser(question, choices)
3013
  if choice == "v":
3014
    choices.pop(1)
3015
    choice = AskUser(msg + affected, choices)
3016
  return choice