Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 6b9b18a2

History | View | Annotate | Download (96.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "AUTO_PROMOTE_OPT",
55
  "AUTO_REPLACE_OPT",
56
  "BACKEND_OPT",
57
  "BLK_OS_OPT",
58
  "CAPAB_MASTER_OPT",
59
  "CAPAB_VM_OPT",
60
  "CLEANUP_OPT",
61
  "CLUSTER_DOMAIN_SECRET_OPT",
62
  "CONFIRM_OPT",
63
  "CP_SIZE_OPT",
64
  "DEBUG_OPT",
65
  "DEBUG_SIMERR_OPT",
66
  "DISKIDX_OPT",
67
  "DISK_OPT",
68
  "DISK_TEMPLATE_OPT",
69
  "DRAINED_OPT",
70
  "DRY_RUN_OPT",
71
  "DRBD_HELPER_OPT",
72
  "EARLY_RELEASE_OPT",
73
  "ENABLED_HV_OPT",
74
  "ERROR_CODES_OPT",
75
  "FIELDS_OPT",
76
  "FILESTORE_DIR_OPT",
77
  "FILESTORE_DRIVER_OPT",
78
  "FORCE_OPT",
79
  "FORCE_VARIANT_OPT",
80
  "GLOBAL_FILEDIR_OPT",
81
  "HID_OS_OPT",
82
  "HVLIST_OPT",
83
  "HVOPTS_OPT",
84
  "HYPERVISOR_OPT",
85
  "IALLOCATOR_OPT",
86
  "DEFAULT_IALLOCATOR_OPT",
87
  "IDENTIFY_DEFAULTS_OPT",
88
  "IGNORE_CONSIST_OPT",
89
  "IGNORE_FAILURES_OPT",
90
  "IGNORE_OFFLINE_OPT",
91
  "IGNORE_REMOVE_FAILURES_OPT",
92
  "IGNORE_SECONDARIES_OPT",
93
  "IGNORE_SIZE_OPT",
94
  "INTERVAL_OPT",
95
  "MAC_PREFIX_OPT",
96
  "MAINTAIN_NODE_HEALTH_OPT",
97
  "MASTER_NETDEV_OPT",
98
  "MC_OPT",
99
  "MIGRATION_MODE_OPT",
100
  "NET_OPT",
101
  "NEW_CLUSTER_CERT_OPT",
102
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103
  "NEW_CONFD_HMAC_KEY_OPT",
104
  "NEW_RAPI_CERT_OPT",
105
  "NEW_SECONDARY_OPT",
106
  "NIC_PARAMS_OPT",
107
  "NODE_FORCE_JOIN_OPT",
108
  "NODE_LIST_OPT",
109
  "NODE_PLACEMENT_OPT",
110
  "NODEGROUP_OPT",
111
  "NODE_PARAMS_OPT",
112
  "NODE_POWERED_OPT",
113
  "NODRBD_STORAGE_OPT",
114
  "NOHDR_OPT",
115
  "NOIPCHECK_OPT",
116
  "NO_INSTALL_OPT",
117
  "NONAMECHECK_OPT",
118
  "NOLVM_STORAGE_OPT",
119
  "NOMODIFY_ETCHOSTS_OPT",
120
  "NOMODIFY_SSH_SETUP_OPT",
121
  "NONICS_OPT",
122
  "NONLIVE_OPT",
123
  "NONPLUS1_OPT",
124
  "NOSHUTDOWN_OPT",
125
  "NOSTART_OPT",
126
  "NOSSH_KEYCHECK_OPT",
127
  "NOVOTING_OPT",
128
  "NWSYNC_OPT",
129
  "ON_PRIMARY_OPT",
130
  "ON_SECONDARY_OPT",
131
  "OFFLINE_OPT",
132
  "OSPARAMS_OPT",
133
  "OS_OPT",
134
  "OS_SIZE_OPT",
135
  "PREALLOC_WIPE_DISKS_OPT",
136
  "PRIMARY_IP_VERSION_OPT",
137
  "PRIORITY_OPT",
138
  "RAPI_CERT_OPT",
139
  "READD_OPT",
140
  "REBOOT_TYPE_OPT",
141
  "REMOVE_INSTANCE_OPT",
142
  "REMOVE_UIDS_OPT",
143
  "RESERVED_LVS_OPT",
144
  "ROMAN_OPT",
145
  "SECONDARY_IP_OPT",
146
  "SELECT_OS_OPT",
147
  "SEP_OPT",
148
  "SHOWCMD_OPT",
149
  "SHUTDOWN_TIMEOUT_OPT",
150
  "SINGLE_NODE_OPT",
151
  "SRC_DIR_OPT",
152
  "SRC_NODE_OPT",
153
  "SUBMIT_OPT",
154
  "STATIC_OPT",
155
  "SYNC_OPT",
156
  "TAG_SRC_OPT",
157
  "TIMEOUT_OPT",
158
  "UIDPOOL_OPT",
159
  "USEUNITS_OPT",
160
  "USE_REPL_NET_OPT",
161
  "VERBOSE_OPT",
162
  "VG_NAME_OPT",
163
  "YES_DOIT_OPT",
164
  # Generic functions for CLI programs
165
  "ConfirmOperation",
166
  "GenericMain",
167
  "GenericInstanceCreate",
168
  "GenericList",
169
  "GenericListFields",
170
  "GetClient",
171
  "GetOnlineNodes",
172
  "JobExecutor",
173
  "JobSubmittedException",
174
  "ParseTimespec",
175
  "RunWhileClusterStopped",
176
  "SubmitOpCode",
177
  "SubmitOrSend",
178
  "UsesRPC",
179
  # Formatting functions
180
  "ToStderr", "ToStdout",
181
  "FormatError",
182
  "FormatQueryResult",
183
  "FormatParameterDict",
184
  "GenerateTable",
185
  "AskUser",
186
  "FormatTimestamp",
187
  "FormatLogMessage",
188
  # Tags functions
189
  "ListTags",
190
  "AddTags",
191
  "RemoveTags",
192
  # command line options support infrastructure
193
  "ARGS_MANY_INSTANCES",
194
  "ARGS_MANY_NODES",
195
  "ARGS_MANY_GROUPS",
196
  "ARGS_NONE",
197
  "ARGS_ONE_INSTANCE",
198
  "ARGS_ONE_NODE",
199
  "ARGS_ONE_GROUP",
200
  "ARGS_ONE_OS",
201
  "ArgChoice",
202
  "ArgCommand",
203
  "ArgFile",
204
  "ArgGroup",
205
  "ArgHost",
206
  "ArgInstance",
207
  "ArgJobId",
208
  "ArgNode",
209
  "ArgOs",
210
  "ArgSuggest",
211
  "ArgUnknown",
212
  "OPT_COMPL_INST_ADD_NODES",
213
  "OPT_COMPL_MANY_NODES",
214
  "OPT_COMPL_ONE_IALLOCATOR",
215
  "OPT_COMPL_ONE_INSTANCE",
216
  "OPT_COMPL_ONE_NODE",
217
  "OPT_COMPL_ONE_NODEGROUP",
218
  "OPT_COMPL_ONE_OS",
219
  "cli_option",
220
  "SplitNodeOption",
221
  "CalculateOSNames",
222
  "ParseFields",
223
  "COMMON_CREATE_OPTS",
224
  ]
225

    
226
NO_PREFIX = "no_"
227
UN_PREFIX = "-"
228

    
229
#: Priorities (sorted)
230
_PRIORITY_NAMES = [
231
  ("low", constants.OP_PRIO_LOW),
232
  ("normal", constants.OP_PRIO_NORMAL),
233
  ("high", constants.OP_PRIO_HIGH),
234
  ]
235

    
236
#: Priority dictionary for easier lookup
237
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
238
# we migrate to Python 2.6
239
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
240

    
241
# Query result status for clients
242
(QR_NORMAL,
243
 QR_UNKNOWN,
244
 QR_INCOMPLETE) = range(3)
245

    
246

    
247
class _Argument:
248
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
249
    self.min = min
250
    self.max = max
251

    
252
  def __repr__(self):
253
    return ("<%s min=%s max=%s>" %
254
            (self.__class__.__name__, self.min, self.max))
255

    
256

    
257
class ArgSuggest(_Argument):
258
  """Suggesting argument.
259

260
  Value can be any of the ones passed to the constructor.
261

262
  """
263
  # pylint: disable-msg=W0622
264
  def __init__(self, min=0, max=None, choices=None):
265
    _Argument.__init__(self, min=min, max=max)
266
    self.choices = choices
267

    
268
  def __repr__(self):
269
    return ("<%s min=%s max=%s choices=%r>" %
270
            (self.__class__.__name__, self.min, self.max, self.choices))
271

    
272

    
273
class ArgChoice(ArgSuggest):
274
  """Choice argument.
275

276
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
277
  but value must be one of the choices.
278

279
  """
280

    
281

    
282
class ArgUnknown(_Argument):
283
  """Unknown argument to program (e.g. determined at runtime).
284

285
  """
286

    
287

    
288
class ArgInstance(_Argument):
289
  """Instances argument.
290

291
  """
292

    
293

    
294
class ArgNode(_Argument):
295
  """Node argument.
296

297
  """
298

    
299

    
300
class ArgGroup(_Argument):
301
  """Node group argument.
302

303
  """
304

    
305

    
306
class ArgJobId(_Argument):
307
  """Job ID argument.
308

309
  """
310

    
311

    
312
class ArgFile(_Argument):
313
  """File path argument.
314

315
  """
316

    
317

    
318
class ArgCommand(_Argument):
319
  """Command argument.
320

321
  """
322

    
323

    
324
class ArgHost(_Argument):
325
  """Host argument.
326

327
  """
328

    
329

    
330
class ArgOs(_Argument):
331
  """OS argument.
332

333
  """
334

    
335

    
336
ARGS_NONE = []
337
ARGS_MANY_INSTANCES = [ArgInstance()]
338
ARGS_MANY_NODES = [ArgNode()]
339
ARGS_MANY_GROUPS = [ArgGroup()]
340
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
341
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
342
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
343
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
344

    
345

    
346
def _ExtractTagsObject(opts, args):
347
  """Extract the tag type object.
348

349
  Note that this function will modify its args parameter.
350

351
  """
352
  if not hasattr(opts, "tag_type"):
353
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
354
  kind = opts.tag_type
355
  if kind == constants.TAG_CLUSTER:
356
    retval = kind, kind
357
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
358
    if not args:
359
      raise errors.OpPrereqError("no arguments passed to the command")
360
    name = args.pop(0)
361
    retval = kind, name
362
  else:
363
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
364
  return retval
365

    
366

    
367
def _ExtendTags(opts, args):
368
  """Extend the args if a source file has been given.
369

370
  This function will extend the tags with the contents of the file
371
  passed in the 'tags_source' attribute of the opts parameter. A file
372
  named '-' will be replaced by stdin.
373

374
  """
375
  fname = opts.tags_source
376
  if fname is None:
377
    return
378
  if fname == "-":
379
    new_fh = sys.stdin
380
  else:
381
    new_fh = open(fname, "r")
382
  new_data = []
383
  try:
384
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
385
    # because of python bug 1633941
386
    while True:
387
      line = new_fh.readline()
388
      if not line:
389
        break
390
      new_data.append(line.strip())
391
  finally:
392
    new_fh.close()
393
  args.extend(new_data)
394

    
395

    
396
def ListTags(opts, args):
397
  """List the tags on a given object.
398

399
  This is a generic implementation that knows how to deal with all
400
  three cases of tag objects (cluster, node, instance). The opts
401
  argument is expected to contain a tag_type field denoting what
402
  object type we work on.
403

404
  """
405
  kind, name = _ExtractTagsObject(opts, args)
406
  cl = GetClient()
407
  result = cl.QueryTags(kind, name)
408
  result = list(result)
409
  result.sort()
410
  for tag in result:
411
    ToStdout(tag)
412

    
413

    
414
def AddTags(opts, args):
415
  """Add tags on a given object.
416

417
  This is a generic implementation that knows how to deal with all
418
  three cases of tag objects (cluster, node, instance). The opts
419
  argument is expected to contain a tag_type field denoting what
420
  object type we work on.
421

422
  """
423
  kind, name = _ExtractTagsObject(opts, args)
424
  _ExtendTags(opts, args)
425
  if not args:
426
    raise errors.OpPrereqError("No tags to be added")
427
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
428
  SubmitOpCode(op, opts=opts)
429

    
430

    
431
def RemoveTags(opts, args):
432
  """Remove tags from a given object.
433

434
  This is a generic implementation that knows how to deal with all
435
  three cases of tag objects (cluster, node, instance). The opts
436
  argument is expected to contain a tag_type field denoting what
437
  object type we work on.
438

439
  """
440
  kind, name = _ExtractTagsObject(opts, args)
441
  _ExtendTags(opts, args)
442
  if not args:
443
    raise errors.OpPrereqError("No tags to be removed")
444
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
445
  SubmitOpCode(op, opts=opts)
446

    
447

    
448
def check_unit(option, opt, value): # pylint: disable-msg=W0613
449
  """OptParsers custom converter for units.
450

451
  """
452
  try:
453
    return utils.ParseUnit(value)
454
  except errors.UnitParseError, err:
455
    raise OptionValueError("option %s: %s" % (opt, err))
456

    
457

    
458
def _SplitKeyVal(opt, data):
459
  """Convert a KeyVal string into a dict.
460

461
  This function will convert a key=val[,...] string into a dict. Empty
462
  values will be converted specially: keys which have the prefix 'no_'
463
  will have the value=False and the prefix stripped, the others will
464
  have value=True.
465

466
  @type opt: string
467
  @param opt: a string holding the option name for which we process the
468
      data, used in building error messages
469
  @type data: string
470
  @param data: a string of the format key=val,key=val,...
471
  @rtype: dict
472
  @return: {key=val, key=val}
473
  @raises errors.ParameterError: if there are duplicate keys
474

475
  """
476
  kv_dict = {}
477
  if data:
478
    for elem in utils.UnescapeAndSplit(data, sep=","):
479
      if "=" in elem:
480
        key, val = elem.split("=", 1)
481
      else:
482
        if elem.startswith(NO_PREFIX):
483
          key, val = elem[len(NO_PREFIX):], False
484
        elif elem.startswith(UN_PREFIX):
485
          key, val = elem[len(UN_PREFIX):], None
486
        else:
487
          key, val = elem, True
488
      if key in kv_dict:
489
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
490
                                    (key, opt))
491
      kv_dict[key] = val
492
  return kv_dict
493

    
494

    
495
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
496
  """Custom parser for ident:key=val,key=val options.
497

498
  This will store the parsed values as a tuple (ident, {key: val}). As such,
499
  multiple uses of this option via action=append is possible.
500

501
  """
502
  if ":" not in value:
503
    ident, rest = value, ''
504
  else:
505
    ident, rest = value.split(":", 1)
506

    
507
  if ident.startswith(NO_PREFIX):
508
    if rest:
509
      msg = "Cannot pass options when removing parameter groups: %s" % value
510
      raise errors.ParameterError(msg)
511
    retval = (ident[len(NO_PREFIX):], False)
512
  elif ident.startswith(UN_PREFIX):
513
    if rest:
514
      msg = "Cannot pass options when removing parameter groups: %s" % value
515
      raise errors.ParameterError(msg)
516
    retval = (ident[len(UN_PREFIX):], None)
517
  else:
518
    kv_dict = _SplitKeyVal(opt, rest)
519
    retval = (ident, kv_dict)
520
  return retval
521

    
522

    
523
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
524
  """Custom parser class for key=val,key=val options.
525

526
  This will store the parsed values as a dict {key: val}.
527

528
  """
529
  return _SplitKeyVal(opt, value)
530

    
531

    
532
def check_bool(option, opt, value): # pylint: disable-msg=W0613
533
  """Custom parser for yes/no options.
534

535
  This will store the parsed value as either True or False.
536

537
  """
538
  value = value.lower()
539
  if value == constants.VALUE_FALSE or value == "no":
540
    return False
541
  elif value == constants.VALUE_TRUE or value == "yes":
542
    return True
543
  else:
544
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
545

    
546

    
547
# completion_suggestion is normally a list. Using numeric values not evaluating
548
# to False for dynamic completion.
549
(OPT_COMPL_MANY_NODES,
550
 OPT_COMPL_ONE_NODE,
551
 OPT_COMPL_ONE_INSTANCE,
552
 OPT_COMPL_ONE_OS,
553
 OPT_COMPL_ONE_IALLOCATOR,
554
 OPT_COMPL_INST_ADD_NODES,
555
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
556

    
557
OPT_COMPL_ALL = frozenset([
558
  OPT_COMPL_MANY_NODES,
559
  OPT_COMPL_ONE_NODE,
560
  OPT_COMPL_ONE_INSTANCE,
561
  OPT_COMPL_ONE_OS,
562
  OPT_COMPL_ONE_IALLOCATOR,
563
  OPT_COMPL_INST_ADD_NODES,
564
  OPT_COMPL_ONE_NODEGROUP,
565
  ])
566

    
567

    
568
class CliOption(Option):
569
  """Custom option class for optparse.
570

571
  """
572
  ATTRS = Option.ATTRS + [
573
    "completion_suggest",
574
    ]
575
  TYPES = Option.TYPES + (
576
    "identkeyval",
577
    "keyval",
578
    "unit",
579
    "bool",
580
    )
581
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
582
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
583
  TYPE_CHECKER["keyval"] = check_key_val
584
  TYPE_CHECKER["unit"] = check_unit
585
  TYPE_CHECKER["bool"] = check_bool
586

    
587

    
588
# optparse.py sets make_option, so we do it for our own option class, too
589
cli_option = CliOption
590

    
591

    
592
_YORNO = "yes|no"
593

    
594
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
595
                       help="Increase debugging level")
596

    
597
NOHDR_OPT = cli_option("--no-headers", default=False,
598
                       action="store_true", dest="no_headers",
599
                       help="Don't display column headers")
600

    
601
SEP_OPT = cli_option("--separator", default=None,
602
                     action="store", dest="separator",
603
                     help=("Separator between output fields"
604
                           " (defaults to one space)"))
605

    
606
USEUNITS_OPT = cli_option("--units", default=None,
607
                          dest="units", choices=('h', 'm', 'g', 't'),
608
                          help="Specify units for output (one of h/m/g/t)")
609

    
610
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
611
                        type="string", metavar="FIELDS",
612
                        help="Comma separated list of output fields")
613

    
614
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
615
                       default=False, help="Force the operation")
616

    
617
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
618
                         default=False, help="Do not require confirmation")
619

    
620
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
621
                                  action="store_true", default=False,
622
                                  help=("Ignore offline nodes and do as much"
623
                                        " as possible"))
624

    
625
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
626
                         default=None, help="File with tag names")
627

    
628
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
629
                        default=False, action="store_true",
630
                        help=("Submit the job and return the job ID, but"
631
                              " don't wait for the job to finish"))
632

    
633
SYNC_OPT = cli_option("--sync", dest="do_locking",
634
                      default=False, action="store_true",
635
                      help=("Grab locks while doing the queries"
636
                            " in order to ensure more consistent results"))
637

    
638
DRY_RUN_OPT = cli_option("--dry-run", default=False,
639
                         action="store_true",
640
                         help=("Do not execute the operation, just run the"
641
                               " check steps and verify it it could be"
642
                               " executed"))
643

    
644
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
645
                         action="store_true",
646
                         help="Increase the verbosity of the operation")
647

    
648
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
649
                              action="store_true", dest="simulate_errors",
650
                              help="Debugging option that makes the operation"
651
                              " treat most runtime checks as failed")
652

    
653
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
654
                        default=True, action="store_false",
655
                        help="Don't wait for sync (DANGEROUS!)")
656

    
657
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
658
                               help="Custom disk setup (diskless, file,"
659
                               " plain or drbd)",
660
                               default=None, metavar="TEMPL",
661
                               choices=list(constants.DISK_TEMPLATES))
662

    
663
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
664
                        help="Do not create any network cards for"
665
                        " the instance")
666

    
667
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
668
                               help="Relative path under default cluster-wide"
669
                               " file storage dir to store file-based disks",
670
                               default=None, metavar="<DIR>")
671

    
672
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
673
                                  help="Driver to use for image files",
674
                                  default="loop", metavar="<DRIVER>",
675
                                  choices=list(constants.FILE_DRIVER))
676

    
677
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
678
                            help="Select nodes for the instance automatically"
679
                            " using the <NAME> iallocator plugin",
680
                            default=None, type="string",
681
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
682

    
683
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
684
                            metavar="<NAME>",
685
                            help="Set the default instance allocator plugin",
686
                            default=None, type="string",
687
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
688

    
689
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
690
                    metavar="<os>",
691
                    completion_suggest=OPT_COMPL_ONE_OS)
692

    
693
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
694
                         type="keyval", default={},
695
                         help="OS parameters")
696

    
697
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
698
                               action="store_true", default=False,
699
                               help="Force an unknown variant")
700

    
701
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
702
                            action="store_true", default=False,
703
                            help="Do not install the OS (will"
704
                            " enable no-start)")
705

    
706
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
707
                         type="keyval", default={},
708
                         help="Backend parameters")
709

    
710
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
711
                         default={}, dest="hvparams",
712
                         help="Hypervisor parameters")
713

    
714
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
715
                            help="Hypervisor and hypervisor options, in the"
716
                            " format hypervisor:option=value,option=value,...",
717
                            default=None, type="identkeyval")
718

    
719
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
720
                        help="Hypervisor and hypervisor options, in the"
721
                        " format hypervisor:option=value,option=value,...",
722
                        default=[], action="append", type="identkeyval")
723

    
724
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
725
                           action="store_false",
726
                           help="Don't check that the instance's IP"
727
                           " is alive")
728

    
729
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
730
                             default=True, action="store_false",
731
                             help="Don't check that the instance's name"
732
                             " is resolvable")
733

    
734
NET_OPT = cli_option("--net",
735
                     help="NIC parameters", default=[],
736
                     dest="nics", action="append", type="identkeyval")
737

    
738
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
739
                      dest="disks", action="append", type="identkeyval")
740

    
741
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
742
                         help="Comma-separated list of disks"
743
                         " indices to act on (e.g. 0,2) (optional,"
744
                         " defaults to all disks)")
745

    
746
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
747
                         help="Enforces a single-disk configuration using the"
748
                         " given disk size, in MiB unless a suffix is used",
749
                         default=None, type="unit", metavar="<size>")
750

    
751
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
752
                                dest="ignore_consistency",
753
                                action="store_true", default=False,
754
                                help="Ignore the consistency of the disks on"
755
                                " the secondary")
756

    
757
NONLIVE_OPT = cli_option("--non-live", dest="live",
758
                         default=True, action="store_false",
759
                         help="Do a non-live migration (this usually means"
760
                         " freeze the instance, save the state, transfer and"
761
                         " only then resume running on the secondary node)")
762

    
763
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
764
                                default=None,
765
                                choices=list(constants.HT_MIGRATION_MODES),
766
                                help="Override default migration mode (choose"
767
                                " either live or non-live")
768

    
769
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
770
                                help="Target node and optional secondary node",
771
                                metavar="<pnode>[:<snode>]",
772
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
773

    
774
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
775
                           action="append", metavar="<node>",
776
                           help="Use only this node (can be used multiple"
777
                           " times, if not given defaults to all nodes)",
778
                           completion_suggest=OPT_COMPL_ONE_NODE)
779

    
780
NODEGROUP_OPT = cli_option("-g", "--node-group",
781
                           dest="nodegroup",
782
                           help="Node group (name or uuid)",
783
                           metavar="<nodegroup>",
784
                           default=None, type="string",
785
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
786

    
787
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
788
                             metavar="<node>",
789
                             completion_suggest=OPT_COMPL_ONE_NODE)
790

    
791
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
792
                         action="store_false",
793
                         help="Don't start the instance after creation")
794

    
795
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
796
                         action="store_true", default=False,
797
                         help="Show command instead of executing it")
798

    
799
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
800
                         default=False, action="store_true",
801
                         help="Instead of performing the migration, try to"
802
                         " recover from a failed cleanup. This is safe"
803
                         " to run even if the instance is healthy, but it"
804
                         " will create extra replication traffic and "
805
                         " disrupt briefly the replication (like during the"
806
                         " migration")
807

    
808
STATIC_OPT = cli_option("-s", "--static", dest="static",
809
                        action="store_true", default=False,
810
                        help="Only show configuration data, not runtime data")
811

    
812
ALL_OPT = cli_option("--all", dest="show_all",
813
                     default=False, action="store_true",
814
                     help="Show info on all instances on the cluster."
815
                     " This can take a long time to run, use wisely")
816

    
817
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
818
                           action="store_true", default=False,
819
                           help="Interactive OS reinstall, lists available"
820
                           " OS templates for selection")
821

    
822
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
823
                                 action="store_true", default=False,
824
                                 help="Remove the instance from the cluster"
825
                                 " configuration even if there are failures"
826
                                 " during the removal process")
827

    
828
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
829
                                        dest="ignore_remove_failures",
830
                                        action="store_true", default=False,
831
                                        help="Remove the instance from the"
832
                                        " cluster configuration even if there"
833
                                        " are failures during the removal"
834
                                        " process")
835

    
836
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
837
                                 action="store_true", default=False,
838
                                 help="Remove the instance from the cluster")
839

    
840
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
841
                               help="Specifies the new secondary node",
842
                               metavar="NODE", default=None,
843
                               completion_suggest=OPT_COMPL_ONE_NODE)
844

    
845
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
846
                            default=False, action="store_true",
847
                            help="Replace the disk(s) on the primary"
848
                            " node (only for the drbd template)")
849

    
850
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
851
                              default=False, action="store_true",
852
                              help="Replace the disk(s) on the secondary"
853
                              " node (only for the drbd template)")
854

    
855
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
856
                              default=False, action="store_true",
857
                              help="Lock all nodes and auto-promote as needed"
858
                              " to MC status")
859

    
860
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
861
                              default=False, action="store_true",
862
                              help="Automatically replace faulty disks"
863
                              " (only for the drbd template)")
864

    
865
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
866
                             default=False, action="store_true",
867
                             help="Ignore current recorded size"
868
                             " (useful for forcing activation when"
869
                             " the recorded size is wrong)")
870

    
871
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
872
                          metavar="<node>",
873
                          completion_suggest=OPT_COMPL_ONE_NODE)
874

    
875
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
876
                         metavar="<dir>")
877

    
878
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
879
                              help="Specify the secondary ip for the node",
880
                              metavar="ADDRESS", default=None)
881

    
882
READD_OPT = cli_option("--readd", dest="readd",
883
                       default=False, action="store_true",
884
                       help="Readd old node after replacing it")
885

    
886
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
887
                                default=True, action="store_false",
888
                                help="Disable SSH key fingerprint checking")
889

    
890
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
891
                                 default=False, action="store_true",
892
                                 help="Force the joining of a node,"
893
                                      " needed when merging clusters")
894

    
895
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
896
                    type="bool", default=None, metavar=_YORNO,
897
                    help="Set the master_candidate flag on the node")
898

    
899
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
900
                         type="bool", default=None,
901
                         help=("Set the offline flag on the node"
902
                               " (cluster does not communicate with offline"
903
                               " nodes)"))
904

    
905
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
906
                         type="bool", default=None,
907
                         help=("Set the drained flag on the node"
908
                               " (excluded from allocation operations)"))
909

    
910
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
911
                    type="bool", default=None, metavar=_YORNO,
912
                    help="Set the master_capable flag on the node")
913

    
914
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
915
                    type="bool", default=None, metavar=_YORNO,
916
                    help="Set the vm_capable flag on the node")
917

    
918
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
919
                             type="bool", default=None, metavar=_YORNO,
920
                             help="Set the allocatable flag on a volume")
921

    
922
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
923
                               help="Disable support for lvm based instances"
924
                               " (cluster-wide)",
925
                               action="store_false", default=True)
926

    
927
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
928
                            dest="enabled_hypervisors",
929
                            help="Comma-separated list of hypervisors",
930
                            type="string", default=None)
931

    
932
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
933
                            type="keyval", default={},
934
                            help="NIC parameters")
935

    
936
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
937
                         dest="candidate_pool_size", type="int",
938
                         help="Set the candidate pool size")
939

    
940
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
941
                         help=("Enables LVM and specifies the volume group"
942
                               " name (cluster-wide) for disk allocation"
943
                               " [%s]" % constants.DEFAULT_VG),
944
                         metavar="VG", default=None)
945

    
946
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
947
                          help="Destroy cluster", action="store_true")
948

    
949
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
950
                          help="Skip node agreement check (dangerous)",
951
                          action="store_true", default=False)
952

    
953
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
954
                            help="Specify the mac prefix for the instance IP"
955
                            " addresses, in the format XX:XX:XX",
956
                            metavar="PREFIX",
957
                            default=None)
958

    
959
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
960
                               help="Specify the node interface (cluster-wide)"
961
                               " on which the master IP address will be added"
962
                               " (cluster init default: %s)" %
963
                               constants.DEFAULT_BRIDGE,
964
                               metavar="NETDEV",
965
                               default=None)
966

    
967
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
968
                                help="Specify the default directory (cluster-"
969
                                "wide) for storing the file-based disks [%s]" %
970
                                constants.DEFAULT_FILE_STORAGE_DIR,
971
                                metavar="DIR",
972
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
973

    
974
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
975
                                   help="Don't modify /etc/hosts",
976
                                   action="store_false", default=True)
977

    
978
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
979
                                    help="Don't initialize SSH keys",
980
                                    action="store_false", default=True)
981

    
982
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
983
                             help="Enable parseable error messages",
984
                             action="store_true", default=False)
985

    
986
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
987
                          help="Skip N+1 memory redundancy tests",
988
                          action="store_true", default=False)
989

    
990
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
991
                             help="Type of reboot: soft/hard/full",
992
                             default=constants.INSTANCE_REBOOT_HARD,
993
                             metavar="<REBOOT>",
994
                             choices=list(constants.REBOOT_TYPES))
995

    
996
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
997
                                    dest="ignore_secondaries",
998
                                    default=False, action="store_true",
999
                                    help="Ignore errors from secondaries")
1000

    
1001
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1002
                            action="store_false", default=True,
1003
                            help="Don't shutdown the instance (unsafe)")
1004

    
1005
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1006
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1007
                         help="Maximum time to wait")
1008

    
1009
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1010
                         dest="shutdown_timeout", type="int",
1011
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1012
                         help="Maximum time to wait for instance shutdown")
1013

    
1014
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1015
                          default=None,
1016
                          help=("Number of seconds between repetions of the"
1017
                                " command"))
1018

    
1019
EARLY_RELEASE_OPT = cli_option("--early-release",
1020
                               dest="early_release", default=False,
1021
                               action="store_true",
1022
                               help="Release the locks on the secondary"
1023
                               " node(s) early")
1024

    
1025
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1026
                                  dest="new_cluster_cert",
1027
                                  default=False, action="store_true",
1028
                                  help="Generate a new cluster certificate")
1029

    
1030
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1031
                           default=None,
1032
                           help="File containing new RAPI certificate")
1033

    
1034
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1035
                               default=None, action="store_true",
1036
                               help=("Generate a new self-signed RAPI"
1037
                                     " certificate"))
1038

    
1039
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1040
                                    dest="new_confd_hmac_key",
1041
                                    default=False, action="store_true",
1042
                                    help=("Create a new HMAC key for %s" %
1043
                                          constants.CONFD))
1044

    
1045
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1046
                                       dest="cluster_domain_secret",
1047
                                       default=None,
1048
                                       help=("Load new new cluster domain"
1049
                                             " secret from file"))
1050

    
1051
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1052
                                           dest="new_cluster_domain_secret",
1053
                                           default=False, action="store_true",
1054
                                           help=("Create a new cluster domain"
1055
                                                 " secret"))
1056

    
1057
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1058
                              dest="use_replication_network",
1059
                              help="Whether to use the replication network"
1060
                              " for talking to the nodes",
1061
                              action="store_true", default=False)
1062

    
1063
MAINTAIN_NODE_HEALTH_OPT = \
1064
    cli_option("--maintain-node-health", dest="maintain_node_health",
1065
               metavar=_YORNO, default=None, type="bool",
1066
               help="Configure the cluster to automatically maintain node"
1067
               " health, by shutting down unknown instances, shutting down"
1068
               " unknown DRBD devices, etc.")
1069

    
1070
IDENTIFY_DEFAULTS_OPT = \
1071
    cli_option("--identify-defaults", dest="identify_defaults",
1072
               default=False, action="store_true",
1073
               help="Identify which saved instance parameters are equal to"
1074
               " the current cluster defaults and set them as such, instead"
1075
               " of marking them as overridden")
1076

    
1077
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1078
                         action="store", dest="uid_pool",
1079
                         help=("A list of user-ids or user-id"
1080
                               " ranges separated by commas"))
1081

    
1082
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1083
                          action="store", dest="add_uids",
1084
                          help=("A list of user-ids or user-id"
1085
                                " ranges separated by commas, to be"
1086
                                " added to the user-id pool"))
1087

    
1088
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1089
                             action="store", dest="remove_uids",
1090
                             help=("A list of user-ids or user-id"
1091
                                   " ranges separated by commas, to be"
1092
                                   " removed from the user-id pool"))
1093

    
1094
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1095
                             action="store", dest="reserved_lvs",
1096
                             help=("A comma-separated list of reserved"
1097
                                   " logical volumes names, that will be"
1098
                                   " ignored by cluster verify"))
1099

    
1100
ROMAN_OPT = cli_option("--roman",
1101
                       dest="roman_integers", default=False,
1102
                       action="store_true",
1103
                       help="Use roman numbers for positive integers")
1104

    
1105
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1106
                             action="store", default=None,
1107
                             help="Specifies usermode helper for DRBD")
1108

    
1109
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1110
                                action="store_false", default=True,
1111
                                help="Disable support for DRBD")
1112

    
1113
PRIMARY_IP_VERSION_OPT = \
1114
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1115
               action="store", dest="primary_ip_version",
1116
               metavar="%d|%d" % (constants.IP4_VERSION,
1117
                                  constants.IP6_VERSION),
1118
               help="Cluster-wide IP version for primary IP")
1119

    
1120
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1121
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1122
                          choices=_PRIONAME_TO_VALUE.keys(),
1123
                          help="Priority for opcode processing")
1124

    
1125
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1126
                        type="bool", default=None, metavar=_YORNO,
1127
                        help="Sets the hidden flag on the OS")
1128

    
1129
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1130
                        type="bool", default=None, metavar=_YORNO,
1131
                        help="Sets the blacklisted flag on the OS")
1132

    
1133
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1134
                                     type="bool", metavar=_YORNO,
1135
                                     dest="prealloc_wipe_disks",
1136
                                     help=("Wipe disks prior to instance"
1137
                                           " creation"))
1138

    
1139
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1140
                             type="keyval", default=None,
1141
                             help="Node parameters")
1142

    
1143
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1144
                              action="store", metavar="POLICY", default=None,
1145
                              help="Allocation policy for the node group")
1146

    
1147
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1148
                              type="bool", metavar=_YORNO,
1149
                              dest="node_powered",
1150
                              help="Specify if the SoR for node is powered")
1151

    
1152

    
1153
#: Options provided by all commands
1154
COMMON_OPTS = [DEBUG_OPT]
1155

    
1156
# common options for creating instances. add and import then add their own
1157
# specific ones.
1158
COMMON_CREATE_OPTS = [
1159
  BACKEND_OPT,
1160
  DISK_OPT,
1161
  DISK_TEMPLATE_OPT,
1162
  FILESTORE_DIR_OPT,
1163
  FILESTORE_DRIVER_OPT,
1164
  HYPERVISOR_OPT,
1165
  IALLOCATOR_OPT,
1166
  NET_OPT,
1167
  NODE_PLACEMENT_OPT,
1168
  NOIPCHECK_OPT,
1169
  NONAMECHECK_OPT,
1170
  NONICS_OPT,
1171
  NWSYNC_OPT,
1172
  OSPARAMS_OPT,
1173
  OS_SIZE_OPT,
1174
  SUBMIT_OPT,
1175
  DRY_RUN_OPT,
1176
  PRIORITY_OPT,
1177
  ]
1178

    
1179

    
1180
def _ParseArgs(argv, commands, aliases):
1181
  """Parser for the command line arguments.
1182

1183
  This function parses the arguments and returns the function which
1184
  must be executed together with its (modified) arguments.
1185

1186
  @param argv: the command line
1187
  @param commands: dictionary with special contents, see the design
1188
      doc for cmdline handling
1189
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1190

1191
  """
1192
  if len(argv) == 0:
1193
    binary = "<command>"
1194
  else:
1195
    binary = argv[0].split("/")[-1]
1196

    
1197
  if len(argv) > 1 and argv[1] == "--version":
1198
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1199
             constants.RELEASE_VERSION)
1200
    # Quit right away. That way we don't have to care about this special
1201
    # argument. optparse.py does it the same.
1202
    sys.exit(0)
1203

    
1204
  if len(argv) < 2 or not (argv[1] in commands or
1205
                           argv[1] in aliases):
1206
    # let's do a nice thing
1207
    sortedcmds = commands.keys()
1208
    sortedcmds.sort()
1209

    
1210
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1211
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1212
    ToStdout("")
1213

    
1214
    # compute the max line length for cmd + usage
1215
    mlen = max([len(" %s" % cmd) for cmd in commands])
1216
    mlen = min(60, mlen) # should not get here...
1217

    
1218
    # and format a nice command list
1219
    ToStdout("Commands:")
1220
    for cmd in sortedcmds:
1221
      cmdstr = " %s" % (cmd,)
1222
      help_text = commands[cmd][4]
1223
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1224
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1225
      for line in help_lines:
1226
        ToStdout("%-*s   %s", mlen, "", line)
1227

    
1228
    ToStdout("")
1229

    
1230
    return None, None, None
1231

    
1232
  # get command, unalias it, and look it up in commands
1233
  cmd = argv.pop(1)
1234
  if cmd in aliases:
1235
    if cmd in commands:
1236
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1237
                                   " command" % cmd)
1238

    
1239
    if aliases[cmd] not in commands:
1240
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1241
                                   " command '%s'" % (cmd, aliases[cmd]))
1242

    
1243
    cmd = aliases[cmd]
1244

    
1245
  func, args_def, parser_opts, usage, description = commands[cmd]
1246
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1247
                        description=description,
1248
                        formatter=TitledHelpFormatter(),
1249
                        usage="%%prog %s %s" % (cmd, usage))
1250
  parser.disable_interspersed_args()
1251
  options, args = parser.parse_args()
1252

    
1253
  if not _CheckArguments(cmd, args_def, args):
1254
    return None, None, None
1255

    
1256
  return func, options, args
1257

    
1258

    
1259
def _CheckArguments(cmd, args_def, args):
1260
  """Verifies the arguments using the argument definition.
1261

1262
  Algorithm:
1263

1264
    1. Abort with error if values specified by user but none expected.
1265

1266
    1. For each argument in definition
1267

1268
      1. Keep running count of minimum number of values (min_count)
1269
      1. Keep running count of maximum number of values (max_count)
1270
      1. If it has an unlimited number of values
1271

1272
        1. Abort with error if it's not the last argument in the definition
1273

1274
    1. If last argument has limited number of values
1275

1276
      1. Abort with error if number of values doesn't match or is too large
1277

1278
    1. Abort with error if user didn't pass enough values (min_count)
1279

1280
  """
1281
  if args and not args_def:
1282
    ToStderr("Error: Command %s expects no arguments", cmd)
1283
    return False
1284

    
1285
  min_count = None
1286
  max_count = None
1287
  check_max = None
1288

    
1289
  last_idx = len(args_def) - 1
1290

    
1291
  for idx, arg in enumerate(args_def):
1292
    if min_count is None:
1293
      min_count = arg.min
1294
    elif arg.min is not None:
1295
      min_count += arg.min
1296

    
1297
    if max_count is None:
1298
      max_count = arg.max
1299
    elif arg.max is not None:
1300
      max_count += arg.max
1301

    
1302
    if idx == last_idx:
1303
      check_max = (arg.max is not None)
1304

    
1305
    elif arg.max is None:
1306
      raise errors.ProgrammerError("Only the last argument can have max=None")
1307

    
1308
  if check_max:
1309
    # Command with exact number of arguments
1310
    if (min_count is not None and max_count is not None and
1311
        min_count == max_count and len(args) != min_count):
1312
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1313
      return False
1314

    
1315
    # Command with limited number of arguments
1316
    if max_count is not None and len(args) > max_count:
1317
      ToStderr("Error: Command %s expects only %d argument(s)",
1318
               cmd, max_count)
1319
      return False
1320

    
1321
  # Command with some required arguments
1322
  if min_count is not None and len(args) < min_count:
1323
    ToStderr("Error: Command %s expects at least %d argument(s)",
1324
             cmd, min_count)
1325
    return False
1326

    
1327
  return True
1328

    
1329

    
1330
def SplitNodeOption(value):
1331
  """Splits the value of a --node option.
1332

1333
  """
1334
  if value and ':' in value:
1335
    return value.split(':', 1)
1336
  else:
1337
    return (value, None)
1338

    
1339

    
1340
def CalculateOSNames(os_name, os_variants):
1341
  """Calculates all the names an OS can be called, according to its variants.
1342

1343
  @type os_name: string
1344
  @param os_name: base name of the os
1345
  @type os_variants: list or None
1346
  @param os_variants: list of supported variants
1347
  @rtype: list
1348
  @return: list of valid names
1349

1350
  """
1351
  if os_variants:
1352
    return ['%s+%s' % (os_name, v) for v in os_variants]
1353
  else:
1354
    return [os_name]
1355

    
1356

    
1357
def ParseFields(selected, default):
1358
  """Parses the values of "--field"-like options.
1359

1360
  @type selected: string or None
1361
  @param selected: User-selected options
1362
  @type default: list
1363
  @param default: Default fields
1364

1365
  """
1366
  if selected is None:
1367
    return default
1368

    
1369
  if selected.startswith("+"):
1370
    return default + selected[1:].split(",")
1371

    
1372
  return selected.split(",")
1373

    
1374

    
1375
UsesRPC = rpc.RunWithRPC
1376

    
1377

    
1378
def AskUser(text, choices=None):
1379
  """Ask the user a question.
1380

1381
  @param text: the question to ask
1382

1383
  @param choices: list with elements tuples (input_char, return_value,
1384
      description); if not given, it will default to: [('y', True,
1385
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1386
      note that the '?' char is reserved for help
1387

1388
  @return: one of the return values from the choices list; if input is
1389
      not possible (i.e. not running with a tty, we return the last
1390
      entry from the list
1391

1392
  """
1393
  if choices is None:
1394
    choices = [('y', True, 'Perform the operation'),
1395
               ('n', False, 'Do not perform the operation')]
1396
  if not choices or not isinstance(choices, list):
1397
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1398
  for entry in choices:
1399
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1400
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1401

    
1402
  answer = choices[-1][1]
1403
  new_text = []
1404
  for line in text.splitlines():
1405
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1406
  text = "\n".join(new_text)
1407
  try:
1408
    f = file("/dev/tty", "a+")
1409
  except IOError:
1410
    return answer
1411
  try:
1412
    chars = [entry[0] for entry in choices]
1413
    chars[-1] = "[%s]" % chars[-1]
1414
    chars.append('?')
1415
    maps = dict([(entry[0], entry[1]) for entry in choices])
1416
    while True:
1417
      f.write(text)
1418
      f.write('\n')
1419
      f.write("/".join(chars))
1420
      f.write(": ")
1421
      line = f.readline(2).strip().lower()
1422
      if line in maps:
1423
        answer = maps[line]
1424
        break
1425
      elif line == '?':
1426
        for entry in choices:
1427
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1428
        f.write("\n")
1429
        continue
1430
  finally:
1431
    f.close()
1432
  return answer
1433

    
1434

    
1435
class JobSubmittedException(Exception):
1436
  """Job was submitted, client should exit.
1437

1438
  This exception has one argument, the ID of the job that was
1439
  submitted. The handler should print this ID.
1440

1441
  This is not an error, just a structured way to exit from clients.
1442

1443
  """
1444

    
1445

    
1446
def SendJob(ops, cl=None):
1447
  """Function to submit an opcode without waiting for the results.
1448

1449
  @type ops: list
1450
  @param ops: list of opcodes
1451
  @type cl: luxi.Client
1452
  @param cl: the luxi client to use for communicating with the master;
1453
             if None, a new client will be created
1454

1455
  """
1456
  if cl is None:
1457
    cl = GetClient()
1458

    
1459
  job_id = cl.SubmitJob(ops)
1460

    
1461
  return job_id
1462

    
1463

    
1464
def GenericPollJob(job_id, cbs, report_cbs):
1465
  """Generic job-polling function.
1466

1467
  @type job_id: number
1468
  @param job_id: Job ID
1469
  @type cbs: Instance of L{JobPollCbBase}
1470
  @param cbs: Data callbacks
1471
  @type report_cbs: Instance of L{JobPollReportCbBase}
1472
  @param report_cbs: Reporting callbacks
1473

1474
  """
1475
  prev_job_info = None
1476
  prev_logmsg_serial = None
1477

    
1478
  status = None
1479

    
1480
  while True:
1481
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1482
                                      prev_logmsg_serial)
1483
    if not result:
1484
      # job not found, go away!
1485
      raise errors.JobLost("Job with id %s lost" % job_id)
1486

    
1487
    if result == constants.JOB_NOTCHANGED:
1488
      report_cbs.ReportNotChanged(job_id, status)
1489

    
1490
      # Wait again
1491
      continue
1492

    
1493
    # Split result, a tuple of (field values, log entries)
1494
    (job_info, log_entries) = result
1495
    (status, ) = job_info
1496

    
1497
    if log_entries:
1498
      for log_entry in log_entries:
1499
        (serial, timestamp, log_type, message) = log_entry
1500
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1501
                                    log_type, message)
1502
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1503

    
1504
    # TODO: Handle canceled and archived jobs
1505
    elif status in (constants.JOB_STATUS_SUCCESS,
1506
                    constants.JOB_STATUS_ERROR,
1507
                    constants.JOB_STATUS_CANCELING,
1508
                    constants.JOB_STATUS_CANCELED):
1509
      break
1510

    
1511
    prev_job_info = job_info
1512

    
1513
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1514
  if not jobs:
1515
    raise errors.JobLost("Job with id %s lost" % job_id)
1516

    
1517
  status, opstatus, result = jobs[0]
1518

    
1519
  if status == constants.JOB_STATUS_SUCCESS:
1520
    return result
1521

    
1522
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1523
    raise errors.OpExecError("Job was canceled")
1524

    
1525
  has_ok = False
1526
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1527
    if status == constants.OP_STATUS_SUCCESS:
1528
      has_ok = True
1529
    elif status == constants.OP_STATUS_ERROR:
1530
      errors.MaybeRaise(msg)
1531

    
1532
      if has_ok:
1533
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1534
                                 (idx, msg))
1535

    
1536
      raise errors.OpExecError(str(msg))
1537

    
1538
  # default failure mode
1539
  raise errors.OpExecError(result)
1540

    
1541

    
1542
class JobPollCbBase:
1543
  """Base class for L{GenericPollJob} callbacks.
1544

1545
  """
1546
  def __init__(self):
1547
    """Initializes this class.
1548

1549
    """
1550

    
1551
  def WaitForJobChangeOnce(self, job_id, fields,
1552
                           prev_job_info, prev_log_serial):
1553
    """Waits for changes on a job.
1554

1555
    """
1556
    raise NotImplementedError()
1557

    
1558
  def QueryJobs(self, job_ids, fields):
1559
    """Returns the selected fields for the selected job IDs.
1560

1561
    @type job_ids: list of numbers
1562
    @param job_ids: Job IDs
1563
    @type fields: list of strings
1564
    @param fields: Fields
1565

1566
    """
1567
    raise NotImplementedError()
1568

    
1569

    
1570
class JobPollReportCbBase:
1571
  """Base class for L{GenericPollJob} reporting callbacks.
1572

1573
  """
1574
  def __init__(self):
1575
    """Initializes this class.
1576

1577
    """
1578

    
1579
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1580
    """Handles a log message.
1581

1582
    """
1583
    raise NotImplementedError()
1584

    
1585
  def ReportNotChanged(self, job_id, status):
1586
    """Called for if a job hasn't changed in a while.
1587

1588
    @type job_id: number
1589
    @param job_id: Job ID
1590
    @type status: string or None
1591
    @param status: Job status if available
1592

1593
    """
1594
    raise NotImplementedError()
1595

    
1596

    
1597
class _LuxiJobPollCb(JobPollCbBase):
1598
  def __init__(self, cl):
1599
    """Initializes this class.
1600

1601
    """
1602
    JobPollCbBase.__init__(self)
1603
    self.cl = cl
1604

    
1605
  def WaitForJobChangeOnce(self, job_id, fields,
1606
                           prev_job_info, prev_log_serial):
1607
    """Waits for changes on a job.
1608

1609
    """
1610
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1611
                                        prev_job_info, prev_log_serial)
1612

    
1613
  def QueryJobs(self, job_ids, fields):
1614
    """Returns the selected fields for the selected job IDs.
1615

1616
    """
1617
    return self.cl.QueryJobs(job_ids, fields)
1618

    
1619

    
1620
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1621
  def __init__(self, feedback_fn):
1622
    """Initializes this class.
1623

1624
    """
1625
    JobPollReportCbBase.__init__(self)
1626

    
1627
    self.feedback_fn = feedback_fn
1628

    
1629
    assert callable(feedback_fn)
1630

    
1631
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1632
    """Handles a log message.
1633

1634
    """
1635
    self.feedback_fn((timestamp, log_type, log_msg))
1636

    
1637
  def ReportNotChanged(self, job_id, status):
1638
    """Called if a job hasn't changed in a while.
1639

1640
    """
1641
    # Ignore
1642

    
1643

    
1644
class StdioJobPollReportCb(JobPollReportCbBase):
1645
  def __init__(self):
1646
    """Initializes this class.
1647

1648
    """
1649
    JobPollReportCbBase.__init__(self)
1650

    
1651
    self.notified_queued = False
1652
    self.notified_waitlock = False
1653

    
1654
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1655
    """Handles a log message.
1656

1657
    """
1658
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1659
             FormatLogMessage(log_type, log_msg))
1660

    
1661
  def ReportNotChanged(self, job_id, status):
1662
    """Called if a job hasn't changed in a while.
1663

1664
    """
1665
    if status is None:
1666
      return
1667

    
1668
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1669
      ToStderr("Job %s is waiting in queue", job_id)
1670
      self.notified_queued = True
1671

    
1672
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1673
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1674
      self.notified_waitlock = True
1675

    
1676

    
1677
def FormatLogMessage(log_type, log_msg):
1678
  """Formats a job message according to its type.
1679

1680
  """
1681
  if log_type != constants.ELOG_MESSAGE:
1682
    log_msg = str(log_msg)
1683

    
1684
  return utils.SafeEncode(log_msg)
1685

    
1686

    
1687
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1688
  """Function to poll for the result of a job.
1689

1690
  @type job_id: job identified
1691
  @param job_id: the job to poll for results
1692
  @type cl: luxi.Client
1693
  @param cl: the luxi client to use for communicating with the master;
1694
             if None, a new client will be created
1695

1696
  """
1697
  if cl is None:
1698
    cl = GetClient()
1699

    
1700
  if reporter is None:
1701
    if feedback_fn:
1702
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1703
    else:
1704
      reporter = StdioJobPollReportCb()
1705
  elif feedback_fn:
1706
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1707

    
1708
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1709

    
1710

    
1711
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1712
  """Legacy function to submit an opcode.
1713

1714
  This is just a simple wrapper over the construction of the processor
1715
  instance. It should be extended to better handle feedback and
1716
  interaction functions.
1717

1718
  """
1719
  if cl is None:
1720
    cl = GetClient()
1721

    
1722
  SetGenericOpcodeOpts([op], opts)
1723

    
1724
  job_id = SendJob([op], cl=cl)
1725

    
1726
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1727
                       reporter=reporter)
1728

    
1729
  return op_results[0]
1730

    
1731

    
1732
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1733
  """Wrapper around SubmitOpCode or SendJob.
1734

1735
  This function will decide, based on the 'opts' parameter, whether to
1736
  submit and wait for the result of the opcode (and return it), or
1737
  whether to just send the job and print its identifier. It is used in
1738
  order to simplify the implementation of the '--submit' option.
1739

1740
  It will also process the opcodes if we're sending the via SendJob
1741
  (otherwise SubmitOpCode does it).
1742

1743
  """
1744
  if opts and opts.submit_only:
1745
    job = [op]
1746
    SetGenericOpcodeOpts(job, opts)
1747
    job_id = SendJob(job, cl=cl)
1748
    raise JobSubmittedException(job_id)
1749
  else:
1750
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1751

    
1752

    
1753
def SetGenericOpcodeOpts(opcode_list, options):
1754
  """Processor for generic options.
1755

1756
  This function updates the given opcodes based on generic command
1757
  line options (like debug, dry-run, etc.).
1758

1759
  @param opcode_list: list of opcodes
1760
  @param options: command line options or None
1761
  @return: None (in-place modification)
1762

1763
  """
1764
  if not options:
1765
    return
1766
  for op in opcode_list:
1767
    op.debug_level = options.debug
1768
    if hasattr(options, "dry_run"):
1769
      op.dry_run = options.dry_run
1770
    if getattr(options, "priority", None) is not None:
1771
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1772

    
1773

    
1774
def GetClient():
1775
  # TODO: Cache object?
1776
  try:
1777
    client = luxi.Client()
1778
  except luxi.NoMasterError:
1779
    ss = ssconf.SimpleStore()
1780

    
1781
    # Try to read ssconf file
1782
    try:
1783
      ss.GetMasterNode()
1784
    except errors.ConfigurationError:
1785
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1786
                                 " not part of a cluster")
1787

    
1788
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1789
    if master != myself:
1790
      raise errors.OpPrereqError("This is not the master node, please connect"
1791
                                 " to node '%s' and rerun the command" %
1792
                                 master)
1793
    raise
1794
  return client
1795

    
1796

    
1797
def FormatError(err):
1798
  """Return a formatted error message for a given error.
1799

1800
  This function takes an exception instance and returns a tuple
1801
  consisting of two values: first, the recommended exit code, and
1802
  second, a string describing the error message (not
1803
  newline-terminated).
1804

1805
  """
1806
  retcode = 1
1807
  obuf = StringIO()
1808
  msg = str(err)
1809
  if isinstance(err, errors.ConfigurationError):
1810
    txt = "Corrupt configuration file: %s" % msg
1811
    logging.error(txt)
1812
    obuf.write(txt + "\n")
1813
    obuf.write("Aborting.")
1814
    retcode = 2
1815
  elif isinstance(err, errors.HooksAbort):
1816
    obuf.write("Failure: hooks execution failed:\n")
1817
    for node, script, out in err.args[0]:
1818
      if out:
1819
        obuf.write("  node: %s, script: %s, output: %s\n" %
1820
                   (node, script, out))
1821
      else:
1822
        obuf.write("  node: %s, script: %s (no output)\n" %
1823
                   (node, script))
1824
  elif isinstance(err, errors.HooksFailure):
1825
    obuf.write("Failure: hooks general failure: %s" % msg)
1826
  elif isinstance(err, errors.ResolverError):
1827
    this_host = netutils.Hostname.GetSysName()
1828
    if err.args[0] == this_host:
1829
      msg = "Failure: can't resolve my own hostname ('%s')"
1830
    else:
1831
      msg = "Failure: can't resolve hostname '%s'"
1832
    obuf.write(msg % err.args[0])
1833
  elif isinstance(err, errors.OpPrereqError):
1834
    if len(err.args) == 2:
1835
      obuf.write("Failure: prerequisites not met for this"
1836
               " operation:\nerror type: %s, error details:\n%s" %
1837
                 (err.args[1], err.args[0]))
1838
    else:
1839
      obuf.write("Failure: prerequisites not met for this"
1840
                 " operation:\n%s" % msg)
1841
  elif isinstance(err, errors.OpExecError):
1842
    obuf.write("Failure: command execution error:\n%s" % msg)
1843
  elif isinstance(err, errors.TagError):
1844
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1845
  elif isinstance(err, errors.JobQueueDrainError):
1846
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1847
               " accept new requests\n")
1848
  elif isinstance(err, errors.JobQueueFull):
1849
    obuf.write("Failure: the job queue is full and doesn't accept new"
1850
               " job submissions until old jobs are archived\n")
1851
  elif isinstance(err, errors.TypeEnforcementError):
1852
    obuf.write("Parameter Error: %s" % msg)
1853
  elif isinstance(err, errors.ParameterError):
1854
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1855
  elif isinstance(err, luxi.NoMasterError):
1856
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1857
               " and listening for connections?")
1858
  elif isinstance(err, luxi.TimeoutError):
1859
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1860
               " been submitted and will continue to run even if the call"
1861
               " timed out. Useful commands in this situation are \"gnt-job"
1862
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1863
    obuf.write(msg)
1864
  elif isinstance(err, luxi.PermissionError):
1865
    obuf.write("It seems you don't have permissions to connect to the"
1866
               " master daemon.\nPlease retry as a different user.")
1867
  elif isinstance(err, luxi.ProtocolError):
1868
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1869
               "%s" % msg)
1870
  elif isinstance(err, errors.JobLost):
1871
    obuf.write("Error checking job status: %s" % msg)
1872
  elif isinstance(err, errors.GenericError):
1873
    obuf.write("Unhandled Ganeti error: %s" % msg)
1874
  elif isinstance(err, JobSubmittedException):
1875
    obuf.write("JobID: %s\n" % err.args[0])
1876
    retcode = 0
1877
  else:
1878
    obuf.write("Unhandled exception: %s" % msg)
1879
  return retcode, obuf.getvalue().rstrip('\n')
1880

    
1881

    
1882
def GenericMain(commands, override=None, aliases=None):
1883
  """Generic main function for all the gnt-* commands.
1884

1885
  Arguments:
1886
    - commands: a dictionary with a special structure, see the design doc
1887
                for command line handling.
1888
    - override: if not None, we expect a dictionary with keys that will
1889
                override command line options; this can be used to pass
1890
                options from the scripts to generic functions
1891
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1892

1893
  """
1894
  # save the program name and the entire command line for later logging
1895
  if sys.argv:
1896
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1897
    if len(sys.argv) >= 2:
1898
      binary += " " + sys.argv[1]
1899
      old_cmdline = " ".join(sys.argv[2:])
1900
    else:
1901
      old_cmdline = ""
1902
  else:
1903
    binary = "<unknown program>"
1904
    old_cmdline = ""
1905

    
1906
  if aliases is None:
1907
    aliases = {}
1908

    
1909
  try:
1910
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1911
  except errors.ParameterError, err:
1912
    result, err_msg = FormatError(err)
1913
    ToStderr(err_msg)
1914
    return 1
1915

    
1916
  if func is None: # parse error
1917
    return 1
1918

    
1919
  if override is not None:
1920
    for key, val in override.iteritems():
1921
      setattr(options, key, val)
1922

    
1923
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1924
                     stderr_logging=True)
1925

    
1926
  if old_cmdline:
1927
    logging.info("run with arguments '%s'", old_cmdline)
1928
  else:
1929
    logging.info("run with no arguments")
1930

    
1931
  try:
1932
    result = func(options, args)
1933
  except (errors.GenericError, luxi.ProtocolError,
1934
          JobSubmittedException), err:
1935
    result, err_msg = FormatError(err)
1936
    logging.exception("Error during command processing")
1937
    ToStderr(err_msg)
1938
  except KeyboardInterrupt:
1939
    result = constants.EXIT_FAILURE
1940
    ToStderr("Aborted. Note that if the operation created any jobs, they"
1941
             " might have been submitted and"
1942
             " will continue to run in the background.")
1943

    
1944
  return result
1945

    
1946

    
1947
def ParseNicOption(optvalue):
1948
  """Parses the value of the --net option(s).
1949

1950
  """
1951
  try:
1952
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1953
  except (TypeError, ValueError), err:
1954
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1955

    
1956
  nics = [{}] * nic_max
1957
  for nidx, ndict in optvalue:
1958
    nidx = int(nidx)
1959

    
1960
    if not isinstance(ndict, dict):
1961
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1962
                                 " got %s" % (nidx, ndict))
1963

    
1964
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1965

    
1966
    nics[nidx] = ndict
1967

    
1968
  return nics
1969

    
1970

    
1971
def GenericInstanceCreate(mode, opts, args):
1972
  """Add an instance to the cluster via either creation or import.
1973

1974
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1975
  @param opts: the command line options selected by the user
1976
  @type args: list
1977
  @param args: should contain only one element, the new instance name
1978
  @rtype: int
1979
  @return: the desired exit code
1980

1981
  """
1982
  instance = args[0]
1983

    
1984
  (pnode, snode) = SplitNodeOption(opts.node)
1985

    
1986
  hypervisor = None
1987
  hvparams = {}
1988
  if opts.hypervisor:
1989
    hypervisor, hvparams = opts.hypervisor
1990

    
1991
  if opts.nics:
1992
    nics = ParseNicOption(opts.nics)
1993
  elif opts.no_nics:
1994
    # no nics
1995
    nics = []
1996
  elif mode == constants.INSTANCE_CREATE:
1997
    # default of one nic, all auto
1998
    nics = [{}]
1999
  else:
2000
    # mode == import
2001
    nics = []
2002

    
2003
  if opts.disk_template == constants.DT_DISKLESS:
2004
    if opts.disks or opts.sd_size is not None:
2005
      raise errors.OpPrereqError("Diskless instance but disk"
2006
                                 " information passed")
2007
    disks = []
2008
  else:
2009
    if (not opts.disks and not opts.sd_size
2010
        and mode == constants.INSTANCE_CREATE):
2011
      raise errors.OpPrereqError("No disk information specified")
2012
    if opts.disks and opts.sd_size is not None:
2013
      raise errors.OpPrereqError("Please use either the '--disk' or"
2014
                                 " '-s' option")
2015
    if opts.sd_size is not None:
2016
      opts.disks = [(0, {"size": opts.sd_size})]
2017

    
2018
    if opts.disks:
2019
      try:
2020
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2021
      except ValueError, err:
2022
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2023
      disks = [{}] * disk_max
2024
    else:
2025
      disks = []
2026
    for didx, ddict in opts.disks:
2027
      didx = int(didx)
2028
      if not isinstance(ddict, dict):
2029
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2030
        raise errors.OpPrereqError(msg)
2031
      elif "size" in ddict:
2032
        if "adopt" in ddict:
2033
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2034
                                     " (disk %d)" % didx)
2035
        try:
2036
          ddict["size"] = utils.ParseUnit(ddict["size"])
2037
        except ValueError, err:
2038
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2039
                                     (didx, err))
2040
      elif "adopt" in ddict:
2041
        if mode == constants.INSTANCE_IMPORT:
2042
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2043
                                     " import")
2044
        ddict["size"] = 0
2045
      else:
2046
        raise errors.OpPrereqError("Missing size or adoption source for"
2047
                                   " disk %d" % didx)
2048
      disks[didx] = ddict
2049

    
2050
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2051
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2052

    
2053
  if mode == constants.INSTANCE_CREATE:
2054
    start = opts.start
2055
    os_type = opts.os
2056
    force_variant = opts.force_variant
2057
    src_node = None
2058
    src_path = None
2059
    no_install = opts.no_install
2060
    identify_defaults = False
2061
  elif mode == constants.INSTANCE_IMPORT:
2062
    start = False
2063
    os_type = None
2064
    force_variant = False
2065
    src_node = opts.src_node
2066
    src_path = opts.src_dir
2067
    no_install = None
2068
    identify_defaults = opts.identify_defaults
2069
  else:
2070
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2071

    
2072
  op = opcodes.OpInstanceCreate(instance_name=instance,
2073
                                disks=disks,
2074
                                disk_template=opts.disk_template,
2075
                                nics=nics,
2076
                                pnode=pnode, snode=snode,
2077
                                ip_check=opts.ip_check,
2078
                                name_check=opts.name_check,
2079
                                wait_for_sync=opts.wait_for_sync,
2080
                                file_storage_dir=opts.file_storage_dir,
2081
                                file_driver=opts.file_driver,
2082
                                iallocator=opts.iallocator,
2083
                                hypervisor=hypervisor,
2084
                                hvparams=hvparams,
2085
                                beparams=opts.beparams,
2086
                                osparams=opts.osparams,
2087
                                mode=mode,
2088
                                start=start,
2089
                                os_type=os_type,
2090
                                force_variant=force_variant,
2091
                                src_node=src_node,
2092
                                src_path=src_path,
2093
                                no_install=no_install,
2094
                                identify_defaults=identify_defaults)
2095

    
2096
  SubmitOrSend(op, opts)
2097
  return 0
2098

    
2099

    
2100
class _RunWhileClusterStoppedHelper:
2101
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2102

2103
  """
2104
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2105
    """Initializes this class.
2106

2107
    @type feedback_fn: callable
2108
    @param feedback_fn: Feedback function
2109
    @type cluster_name: string
2110
    @param cluster_name: Cluster name
2111
    @type master_node: string
2112
    @param master_node Master node name
2113
    @type online_nodes: list
2114
    @param online_nodes: List of names of online nodes
2115

2116
    """
2117
    self.feedback_fn = feedback_fn
2118
    self.cluster_name = cluster_name
2119
    self.master_node = master_node
2120
    self.online_nodes = online_nodes
2121

    
2122
    self.ssh = ssh.SshRunner(self.cluster_name)
2123

    
2124
    self.nonmaster_nodes = [name for name in online_nodes
2125
                            if name != master_node]
2126

    
2127
    assert self.master_node not in self.nonmaster_nodes
2128

    
2129
  def _RunCmd(self, node_name, cmd):
2130
    """Runs a command on the local or a remote machine.
2131

2132
    @type node_name: string
2133
    @param node_name: Machine name
2134
    @type cmd: list
2135
    @param cmd: Command
2136

2137
    """
2138
    if node_name is None or node_name == self.master_node:
2139
      # No need to use SSH
2140
      result = utils.RunCmd(cmd)
2141
    else:
2142
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2143

    
2144
    if result.failed:
2145
      errmsg = ["Failed to run command %s" % result.cmd]
2146
      if node_name:
2147
        errmsg.append("on node %s" % node_name)
2148
      errmsg.append(": exitcode %s and error %s" %
2149
                    (result.exit_code, result.output))
2150
      raise errors.OpExecError(" ".join(errmsg))
2151

    
2152
  def Call(self, fn, *args):
2153
    """Call function while all daemons are stopped.
2154

2155
    @type fn: callable
2156
    @param fn: Function to be called
2157

2158
    """
2159
    # Pause watcher by acquiring an exclusive lock on watcher state file
2160
    self.feedback_fn("Blocking watcher")
2161
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2162
    try:
2163
      # TODO: Currently, this just blocks. There's no timeout.
2164
      # TODO: Should it be a shared lock?
2165
      watcher_block.Exclusive(blocking=True)
2166

    
2167
      # Stop master daemons, so that no new jobs can come in and all running
2168
      # ones are finished
2169
      self.feedback_fn("Stopping master daemons")
2170
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2171
      try:
2172
        # Stop daemons on all nodes
2173
        for node_name in self.online_nodes:
2174
          self.feedback_fn("Stopping daemons on %s" % node_name)
2175
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2176

    
2177
        # All daemons are shut down now
2178
        try:
2179
          return fn(self, *args)
2180
        except Exception, err:
2181
          _, errmsg = FormatError(err)
2182
          logging.exception("Caught exception")
2183
          self.feedback_fn(errmsg)
2184
          raise
2185
      finally:
2186
        # Start cluster again, master node last
2187
        for node_name in self.nonmaster_nodes + [self.master_node]:
2188
          self.feedback_fn("Starting daemons on %s" % node_name)
2189
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2190
    finally:
2191
      # Resume watcher
2192
      watcher_block.Close()
2193

    
2194

    
2195
def RunWhileClusterStopped(feedback_fn, fn, *args):
2196
  """Calls a function while all cluster daemons are stopped.
2197

2198
  @type feedback_fn: callable
2199
  @param feedback_fn: Feedback function
2200
  @type fn: callable
2201
  @param fn: Function to be called when daemons are stopped
2202

2203
  """
2204
  feedback_fn("Gathering cluster information")
2205

    
2206
  # This ensures we're running on the master daemon
2207
  cl = GetClient()
2208

    
2209
  (cluster_name, master_node) = \
2210
    cl.QueryConfigValues(["cluster_name", "master_node"])
2211

    
2212
  online_nodes = GetOnlineNodes([], cl=cl)
2213

    
2214
  # Don't keep a reference to the client. The master daemon will go away.
2215
  del cl
2216

    
2217
  assert master_node in online_nodes
2218

    
2219
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2220
                                       online_nodes).Call(fn, *args)
2221

    
2222

    
2223
def GenerateTable(headers, fields, separator, data,
2224
                  numfields=None, unitfields=None,
2225
                  units=None):
2226
  """Prints a table with headers and different fields.
2227

2228
  @type headers: dict
2229
  @param headers: dictionary mapping field names to headers for
2230
      the table
2231
  @type fields: list
2232
  @param fields: the field names corresponding to each row in
2233
      the data field
2234
  @param separator: the separator to be used; if this is None,
2235
      the default 'smart' algorithm is used which computes optimal
2236
      field width, otherwise just the separator is used between
2237
      each field
2238
  @type data: list
2239
  @param data: a list of lists, each sublist being one row to be output
2240
  @type numfields: list
2241
  @param numfields: a list with the fields that hold numeric
2242
      values and thus should be right-aligned
2243
  @type unitfields: list
2244
  @param unitfields: a list with the fields that hold numeric
2245
      values that should be formatted with the units field
2246
  @type units: string or None
2247
  @param units: the units we should use for formatting, or None for
2248
      automatic choice (human-readable for non-separator usage, otherwise
2249
      megabytes); this is a one-letter string
2250

2251
  """
2252
  if units is None:
2253
    if separator:
2254
      units = "m"
2255
    else:
2256
      units = "h"
2257

    
2258
  if numfields is None:
2259
    numfields = []
2260
  if unitfields is None:
2261
    unitfields = []
2262

    
2263
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2264
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2265

    
2266
  format_fields = []
2267
  for field in fields:
2268
    if headers and field not in headers:
2269
      # TODO: handle better unknown fields (either revert to old
2270
      # style of raising exception, or deal more intelligently with
2271
      # variable fields)
2272
      headers[field] = field
2273
    if separator is not None:
2274
      format_fields.append("%s")
2275
    elif numfields.Matches(field):
2276
      format_fields.append("%*s")
2277
    else:
2278
      format_fields.append("%-*s")
2279

    
2280
  if separator is None:
2281
    mlens = [0 for name in fields]
2282
    format_str = ' '.join(format_fields)
2283
  else:
2284
    format_str = separator.replace("%", "%%").join(format_fields)
2285

    
2286
  for row in data:
2287
    if row is None:
2288
      continue
2289
    for idx, val in enumerate(row):
2290
      if unitfields.Matches(fields[idx]):
2291
        try:
2292
          val = int(val)
2293
        except (TypeError, ValueError):
2294
          pass
2295
        else:
2296
          val = row[idx] = utils.FormatUnit(val, units)
2297
      val = row[idx] = str(val)
2298
      if separator is None:
2299
        mlens[idx] = max(mlens[idx], len(val))
2300

    
2301
  result = []
2302
  if headers:
2303
    args = []
2304
    for idx, name in enumerate(fields):
2305
      hdr = headers[name]
2306
      if separator is None:
2307
        mlens[idx] = max(mlens[idx], len(hdr))
2308
        args.append(mlens[idx])
2309
      args.append(hdr)
2310
    result.append(format_str % tuple(args))
2311

    
2312
  if separator is None:
2313
    assert len(mlens) == len(fields)
2314

    
2315
    if fields and not numfields.Matches(fields[-1]):
2316
      mlens[-1] = 0
2317

    
2318
  for line in data:
2319
    args = []
2320
    if line is None:
2321
      line = ['-' for _ in fields]
2322
    for idx in range(len(fields)):
2323
      if separator is None:
2324
        args.append(mlens[idx])
2325
      args.append(line[idx])
2326
    result.append(format_str % tuple(args))
2327

    
2328
  return result
2329

    
2330

    
2331
def _FormatBool(value):
2332
  """Formats a boolean value as a string.
2333

2334
  """
2335
  if value:
2336
    return "Y"
2337
  return "N"
2338

    
2339

    
2340
#: Default formatting for query results; (callback, align right)
2341
_DEFAULT_FORMAT_QUERY = {
2342
  constants.QFT_TEXT: (str, False),
2343
  constants.QFT_BOOL: (_FormatBool, False),
2344
  constants.QFT_NUMBER: (str, True),
2345
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2346
  constants.QFT_OTHER: (str, False),
2347
  constants.QFT_UNKNOWN: (str, False),
2348
  }
2349

    
2350

    
2351
def _GetColumnFormatter(fdef, override, unit):
2352
  """Returns formatting function for a field.
2353

2354
  @type fdef: L{objects.QueryFieldDefinition}
2355
  @type override: dict
2356
  @param override: Dictionary for overriding field formatting functions,
2357
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2358
  @type unit: string
2359
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2360
  @rtype: tuple; (callable, bool)
2361
  @return: Returns the function to format a value (takes one parameter) and a
2362
    boolean for aligning the value on the right-hand side
2363

2364
  """
2365
  fmt = override.get(fdef.name, None)
2366
  if fmt is not None:
2367
    return fmt
2368

    
2369
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2370

    
2371
  if fdef.kind == constants.QFT_UNIT:
2372
    # Can't keep this information in the static dictionary
2373
    return (lambda value: utils.FormatUnit(value, unit), True)
2374

    
2375
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2376
  if fmt is not None:
2377
    return fmt
2378

    
2379
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2380

    
2381

    
2382
class _QueryColumnFormatter:
2383
  """Callable class for formatting fields of a query.
2384

2385
  """
2386
  def __init__(self, fn, status_fn, verbose):
2387
    """Initializes this class.
2388

2389
    @type fn: callable
2390
    @param fn: Formatting function
2391
    @type status_fn: callable
2392
    @param status_fn: Function to report fields' status
2393
    @type verbose: boolean
2394
    @param verbose: whether to use verbose field descriptions or not
2395

2396
    """
2397
    self._fn = fn
2398
    self._status_fn = status_fn
2399
    self._verbose = verbose
2400

    
2401
  def __call__(self, data):
2402
    """Returns a field's string representation.
2403

2404
    """
2405
    (status, value) = data
2406

    
2407
    # Report status
2408
    self._status_fn(status)
2409

    
2410
    if status == constants.RS_NORMAL:
2411
      return self._fn(value)
2412

    
2413
    assert value is None, \
2414
           "Found value %r for abnormal status %s" % (value, status)
2415

    
2416
    return FormatResultError(status, verbose=self._verbose)
2417

    
2418

    
2419
def FormatResultError(status, verbose=True):
2420
  """Formats result status other than L{constants.RS_NORMAL}.
2421

2422
  @param status: The result status
2423
  @return: Text of result status
2424

2425
  """
2426
  assert status != constants.RS_NORMAL, \
2427
         "FormatResultError called with status equal to constants.RS_NORMAL"
2428
  try:
2429
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2430
  except KeyError:
2431
    raise NotImplementedError("Unknown status %s" % status)
2432
  else:
2433
    if verbose:
2434
      return verbose_text
2435
    return normal_text
2436

    
2437

    
2438
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2439
                      header=False, verbose=False):
2440
  """Formats data in L{objects.QueryResponse}.
2441

2442
  @type result: L{objects.QueryResponse}
2443
  @param result: result of query operation
2444
  @type unit: string
2445
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2446
    see L{utils.text.FormatUnit}
2447
  @type format_override: dict
2448
  @param format_override: Dictionary for overriding field formatting functions,
2449
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2450
  @type separator: string or None
2451
  @param separator: String used to separate fields
2452
  @type header: bool
2453
  @param header: Whether to output header row
2454
  @type verbose: boolean
2455
  @param verbose: whether to use verbose field descriptions or not
2456

2457
  """
2458
  if unit is None:
2459
    if separator:
2460
      unit = "m"
2461
    else:
2462
      unit = "h"
2463

    
2464
  if format_override is None:
2465
    format_override = {}
2466

    
2467
  stats = dict.fromkeys(constants.RS_ALL, 0)
2468

    
2469
  def _RecordStatus(status):
2470
    if status in stats:
2471
      stats[status] += 1
2472

    
2473
  columns = []
2474
  for fdef in result.fields:
2475
    assert fdef.title and fdef.name
2476
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2477
    columns.append(TableColumn(fdef.title,
2478
                               _QueryColumnFormatter(fn, _RecordStatus,
2479
                                                     verbose),
2480
                               align_right))
2481

    
2482
  table = FormatTable(result.data, columns, header, separator)
2483

    
2484
  # Collect statistics
2485
  assert len(stats) == len(constants.RS_ALL)
2486
  assert compat.all(count >= 0 for count in stats.values())
2487

    
2488
  # Determine overall status. If there was no data, unknown fields must be
2489
  # detected via the field definitions.
2490
  if (stats[constants.RS_UNKNOWN] or
2491
      (not result.data and _GetUnknownFields(result.fields))):
2492
    status = QR_UNKNOWN
2493
  elif compat.any(count > 0 for key, count in stats.items()
2494
                  if key != constants.RS_NORMAL):
2495
    status = QR_INCOMPLETE
2496
  else:
2497
    status = QR_NORMAL
2498

    
2499
  return (status, table)
2500

    
2501

    
2502
def _GetUnknownFields(fdefs):
2503
  """Returns list of unknown fields included in C{fdefs}.
2504

2505
  @type fdefs: list of L{objects.QueryFieldDefinition}
2506

2507
  """
2508
  return [fdef for fdef in fdefs
2509
          if fdef.kind == constants.QFT_UNKNOWN]
2510

    
2511

    
2512
def _WarnUnknownFields(fdefs):
2513
  """Prints a warning to stderr if a query included unknown fields.
2514

2515
  @type fdefs: list of L{objects.QueryFieldDefinition}
2516

2517
  """
2518
  unknown = _GetUnknownFields(fdefs)
2519
  if unknown:
2520
    ToStderr("Warning: Queried for unknown fields %s",
2521
             utils.CommaJoin(fdef.name for fdef in unknown))
2522
    return True
2523

    
2524
  return False
2525

    
2526

    
2527
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2528
                format_override=None, verbose=False):
2529
  """Generic implementation for listing all items of a resource.
2530

2531
  @param resource: One of L{constants.QR_OP_LUXI}
2532
  @type fields: list of strings
2533
  @param fields: List of fields to query for
2534
  @type names: list of strings
2535
  @param names: Names of items to query for
2536
  @type unit: string or None
2537
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2538
    None for automatic choice (human-readable for non-separator usage,
2539
    otherwise megabytes); this is a one-letter string
2540
  @type separator: string or None
2541
  @param separator: String used to separate fields
2542
  @type header: bool
2543
  @param header: Whether to show header row
2544
  @type format_override: dict
2545
  @param format_override: Dictionary for overriding field formatting functions,
2546
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2547
  @type verbose: boolean
2548
  @param verbose: whether to use verbose field descriptions or not
2549

2550
  """
2551
  if cl is None:
2552
    cl = GetClient()
2553

    
2554
  if not names:
2555
    names = None
2556

    
2557
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2558

    
2559
  found_unknown = _WarnUnknownFields(response.fields)
2560

    
2561
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2562
                                     header=header,
2563
                                     format_override=format_override,
2564
                                     verbose=verbose)
2565

    
2566
  for line in data:
2567
    ToStdout(line)
2568

    
2569
  assert ((found_unknown and status == QR_UNKNOWN) or
2570
          (not found_unknown and status != QR_UNKNOWN))
2571

    
2572
  if status == QR_UNKNOWN:
2573
    return constants.EXIT_UNKNOWN_FIELD
2574

    
2575
  # TODO: Should the list command fail if not all data could be collected?
2576
  return constants.EXIT_SUCCESS
2577

    
2578

    
2579
def GenericListFields(resource, fields, separator, header, cl=None):
2580
  """Generic implementation for listing fields for a resource.
2581

2582
  @param resource: One of L{constants.QR_OP_LUXI}
2583
  @type fields: list of strings
2584
  @param fields: List of fields to query for
2585
  @type separator: string or None
2586
  @param separator: String used to separate fields
2587
  @type header: bool
2588
  @param header: Whether to show header row
2589

2590
  """
2591
  if cl is None:
2592
    cl = GetClient()
2593

    
2594
  if not fields:
2595
    fields = None
2596

    
2597
  response = cl.QueryFields(resource, fields)
2598

    
2599
  found_unknown = _WarnUnknownFields(response.fields)
2600

    
2601
  columns = [
2602
    TableColumn("Name", str, False),
2603
    TableColumn("Title", str, False),
2604
    # TODO: Add field description to master daemon
2605
    ]
2606

    
2607
  rows = [[fdef.name, fdef.title] for fdef in response.fields]
2608

    
2609
  for line in FormatTable(rows, columns, header, separator):
2610
    ToStdout(line)
2611

    
2612
  if found_unknown:
2613
    return constants.EXIT_UNKNOWN_FIELD
2614

    
2615
  return constants.EXIT_SUCCESS
2616

    
2617

    
2618
class TableColumn:
2619
  """Describes a column for L{FormatTable}.
2620

2621
  """
2622
  def __init__(self, title, fn, align_right):
2623
    """Initializes this class.
2624

2625
    @type title: string
2626
    @param title: Column title
2627
    @type fn: callable
2628
    @param fn: Formatting function
2629
    @type align_right: bool
2630
    @param align_right: Whether to align values on the right-hand side
2631

2632
    """
2633
    self.title = title
2634
    self.format = fn
2635
    self.align_right = align_right
2636

    
2637

    
2638
def _GetColFormatString(width, align_right):
2639
  """Returns the format string for a field.
2640

2641
  """
2642
  if align_right:
2643
    sign = ""
2644
  else:
2645
    sign = "-"
2646

    
2647
  return "%%%s%ss" % (sign, width)
2648

    
2649

    
2650
def FormatTable(rows, columns, header, separator):
2651
  """Formats data as a table.
2652

2653
  @type rows: list of lists
2654
  @param rows: Row data, one list per row
2655
  @type columns: list of L{TableColumn}
2656
  @param columns: Column descriptions
2657
  @type header: bool
2658
  @param header: Whether to show header row
2659
  @type separator: string or None
2660
  @param separator: String used to separate columns
2661

2662
  """
2663
  if header:
2664
    data = [[col.title for col in columns]]
2665
    colwidth = [len(col.title) for col in columns]
2666
  else:
2667
    data = []
2668
    colwidth = [0 for _ in columns]
2669

    
2670
  # Format row data
2671
  for row in rows:
2672
    assert len(row) == len(columns)
2673

    
2674
    formatted = [col.format(value) for value, col in zip(row, columns)]
2675

    
2676
    if separator is None:
2677
      # Update column widths
2678
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2679
        # Modifying a list's items while iterating is fine
2680
        colwidth[idx] = max(oldwidth, len(value))
2681

    
2682
    data.append(formatted)
2683

    
2684
  if separator is not None:
2685
    # Return early if a separator is used
2686
    return [separator.join(row) for row in data]
2687

    
2688
  if columns and not columns[-1].align_right:
2689
    # Avoid unnecessary spaces at end of line
2690
    colwidth[-1] = 0
2691

    
2692
  # Build format string
2693
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2694
                  for col, width in zip(columns, colwidth)])
2695

    
2696
  return [fmt % tuple(row) for row in data]
2697

    
2698

    
2699
def FormatTimestamp(ts):
2700
  """Formats a given timestamp.
2701

2702
  @type ts: timestamp
2703
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2704

2705
  @rtype: string
2706
  @return: a string with the formatted timestamp
2707

2708
  """
2709
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2710
    return '?'
2711
  sec, usec = ts
2712
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2713

    
2714

    
2715
def ParseTimespec(value):
2716
  """Parse a time specification.
2717

2718
  The following suffixed will be recognized:
2719

2720
    - s: seconds
2721
    - m: minutes
2722
    - h: hours
2723
    - d: day
2724
    - w: weeks
2725

2726
  Without any suffix, the value will be taken to be in seconds.
2727

2728
  """
2729
  value = str(value)
2730
  if not value:
2731
    raise errors.OpPrereqError("Empty time specification passed")
2732
  suffix_map = {
2733
    's': 1,
2734
    'm': 60,
2735
    'h': 3600,
2736
    'd': 86400,
2737
    'w': 604800,
2738
    }
2739
  if value[-1] not in suffix_map:
2740
    try:
2741
      value = int(value)
2742
    except (TypeError, ValueError):
2743
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2744
  else:
2745
    multiplier = suffix_map[value[-1]]
2746
    value = value[:-1]
2747
    if not value: # no data left after stripping the suffix
2748
      raise errors.OpPrereqError("Invalid time specification (only"
2749
                                 " suffix passed)")
2750
    try:
2751
      value = int(value) * multiplier
2752
    except (TypeError, ValueError):
2753
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2754
  return value
2755

    
2756

    
2757
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2758
                   filter_master=False):
2759
  """Returns the names of online nodes.
2760

2761
  This function will also log a warning on stderr with the names of
2762
  the online nodes.
2763

2764
  @param nodes: if not empty, use only this subset of nodes (minus the
2765
      offline ones)
2766
  @param cl: if not None, luxi client to use
2767
  @type nowarn: boolean
2768
  @param nowarn: by default, this function will output a note with the
2769
      offline nodes that are skipped; if this parameter is True the
2770
      note is not displayed
2771
  @type secondary_ips: boolean
2772
  @param secondary_ips: if True, return the secondary IPs instead of the
2773
      names, useful for doing network traffic over the replication interface
2774
      (if any)
2775
  @type filter_master: boolean
2776
  @param filter_master: if True, do not return the master node in the list
2777
      (useful in coordination with secondary_ips where we cannot check our
2778
      node name against the list)
2779

2780
  """
2781
  if cl is None:
2782
    cl = GetClient()
2783

    
2784
  if secondary_ips:
2785
    name_idx = 2
2786
  else:
2787
    name_idx = 0
2788

    
2789
  if filter_master:
2790
    master_node = cl.QueryConfigValues(["master_node"])[0]
2791
    filter_fn = lambda x: x != master_node
2792
  else:
2793
    filter_fn = lambda _: True
2794

    
2795
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2796
                         use_locking=False)
2797
  offline = [row[0] for row in result if row[1]]
2798
  if offline and not nowarn:
2799
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2800
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2801

    
2802

    
2803
def _ToStream(stream, txt, *args):
2804
  """Write a message to a stream, bypassing the logging system
2805

2806
  @type stream: file object
2807
  @param stream: the file to which we should write
2808
  @type txt: str
2809
  @param txt: the message
2810

2811
  """
2812
  if args:
2813
    args = tuple(args)
2814
    stream.write(txt % args)
2815
  else:
2816
    stream.write(txt)
2817
  stream.write('\n')
2818
  stream.flush()
2819

    
2820

    
2821
def ToStdout(txt, *args):
2822
  """Write a message to stdout only, bypassing the logging system
2823

2824
  This is just a wrapper over _ToStream.
2825

2826
  @type txt: str
2827
  @param txt: the message
2828

2829
  """
2830
  _ToStream(sys.stdout, txt, *args)
2831

    
2832

    
2833
def ToStderr(txt, *args):
2834
  """Write a message to stderr only, bypassing the logging system
2835

2836
  This is just a wrapper over _ToStream.
2837

2838
  @type txt: str
2839
  @param txt: the message
2840

2841
  """
2842
  _ToStream(sys.stderr, txt, *args)
2843

    
2844

    
2845
class JobExecutor(object):
2846
  """Class which manages the submission and execution of multiple jobs.
2847

2848
  Note that instances of this class should not be reused between
2849
  GetResults() calls.
2850

2851
  """
2852
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2853
    self.queue = []
2854
    if cl is None:
2855
      cl = GetClient()
2856
    self.cl = cl
2857
    self.verbose = verbose
2858
    self.jobs = []
2859
    self.opts = opts
2860
    self.feedback_fn = feedback_fn
2861

    
2862
  def QueueJob(self, name, *ops):
2863
    """Record a job for later submit.
2864

2865
    @type name: string
2866
    @param name: a description of the job, will be used in WaitJobSet
2867
    """
2868
    SetGenericOpcodeOpts(ops, self.opts)
2869
    self.queue.append((name, ops))
2870

    
2871
  def SubmitPending(self, each=False):
2872
    """Submit all pending jobs.
2873

2874
    """
2875
    if each:
2876
      results = []
2877
      for row in self.queue:
2878
        # SubmitJob will remove the success status, but raise an exception if
2879
        # the submission fails, so we'll notice that anyway.
2880
        results.append([True, self.cl.SubmitJob(row[1])])
2881
    else:
2882
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2883
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2884
                                                            self.queue)):
2885
      self.jobs.append((idx, status, data, name))
2886

    
2887
  def _ChooseJob(self):
2888
    """Choose a non-waiting/queued job to poll next.
2889

2890
    """
2891
    assert self.jobs, "_ChooseJob called with empty job list"
2892

    
2893
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2894
    assert result
2895

    
2896
    for job_data, status in zip(self.jobs, result):
2897
      if (isinstance(status, list) and status and
2898
          status[0] in (constants.JOB_STATUS_QUEUED,
2899
                        constants.JOB_STATUS_WAITLOCK,
2900
                        constants.JOB_STATUS_CANCELING)):
2901
        # job is still present and waiting
2902
        continue
2903
      # good candidate found (either running job or lost job)
2904
      self.jobs.remove(job_data)
2905
      return job_data
2906

    
2907
    # no job found
2908
    return self.jobs.pop(0)
2909

    
2910
  def GetResults(self):
2911
    """Wait for and return the results of all jobs.
2912

2913
    @rtype: list
2914
    @return: list of tuples (success, job results), in the same order
2915
        as the submitted jobs; if a job has failed, instead of the result
2916
        there will be the error message
2917

2918
    """
2919
    if not self.jobs:
2920
      self.SubmitPending()
2921
    results = []
2922
    if self.verbose:
2923
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2924
      if ok_jobs:
2925
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2926

    
2927
    # first, remove any non-submitted jobs
2928
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2929
    for idx, _, jid, name in failures:
2930
      ToStderr("Failed to submit job for %s: %s", name, jid)
2931
      results.append((idx, False, jid))
2932

    
2933
    while self.jobs:
2934
      (idx, _, jid, name) = self._ChooseJob()
2935
      ToStdout("Waiting for job %s for %s...", jid, name)
2936
      try:
2937
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2938
        success = True
2939
      except errors.JobLost, err:
2940
        _, job_result = FormatError(err)
2941
        ToStderr("Job %s for %s has been archived, cannot check its result",
2942
                 jid, name)
2943
        success = False
2944
      except (errors.GenericError, luxi.ProtocolError), err:
2945
        _, job_result = FormatError(err)
2946
        success = False
2947
        # the error message will always be shown, verbose or not
2948
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2949

    
2950
      results.append((idx, success, job_result))
2951

    
2952
    # sort based on the index, then drop it
2953
    results.sort()
2954
    results = [i[1:] for i in results]
2955

    
2956
    return results
2957

    
2958
  def WaitOrShow(self, wait):
2959
    """Wait for job results or only print the job IDs.
2960

2961
    @type wait: boolean
2962
    @param wait: whether to wait or not
2963

2964
    """
2965
    if wait:
2966
      return self.GetResults()
2967
    else:
2968
      if not self.jobs:
2969
        self.SubmitPending()
2970
      for _, status, result, name in self.jobs:
2971
        if status:
2972
          ToStdout("%s: %s", result, name)
2973
        else:
2974
          ToStderr("Failure for %s: %s", name, result)
2975
      return [row[1:3] for row in self.jobs]
2976

    
2977

    
2978
def FormatParameterDict(buf, param_dict, actual, level=1):
2979
  """Formats a parameter dictionary.
2980

2981
  @type buf: L{StringIO}
2982
  @param buf: the buffer into which to write
2983
  @type param_dict: dict
2984
  @param param_dict: the own parameters
2985
  @type actual: dict
2986
  @param actual: the current parameter set (including defaults)
2987
  @param level: Level of indent
2988

2989
  """
2990
  indent = "  " * level
2991
  for key in sorted(actual):
2992
    val = param_dict.get(key, "default (%s)" % actual[key])
2993
    buf.write("%s- %s: %s\n" % (indent, key, val))
2994

    
2995

    
2996
def ConfirmOperation(names, list_type, text, extra=""):
2997
  """Ask the user to confirm an operation on a list of list_type.
2998

2999
  This function is used to request confirmation for doing an operation
3000
  on a given list of list_type.
3001

3002
  @type names: list
3003
  @param names: the list of names that we display when
3004
      we ask for confirmation
3005
  @type list_type: str
3006
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3007
  @type text: str
3008
  @param text: the operation that the user should confirm
3009
  @rtype: boolean
3010
  @return: True or False depending on user's confirmation.
3011

3012
  """
3013
  count = len(names)
3014
  msg = ("The %s will operate on %d %s.\n%s"
3015
         "Do you want to continue?" % (text, count, list_type, extra))
3016
  affected = (("\nAffected %s:\n" % list_type) +
3017
              "\n".join(["  %s" % name for name in names]))
3018

    
3019
  choices = [("y", True, "Yes, execute the %s" % text),
3020
             ("n", False, "No, abort the %s" % text)]
3021

    
3022
  if count > 20:
3023
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3024
    question = msg
3025
  else:
3026
    question = msg + affected
3027

    
3028
  choice = AskUser(question, choices)
3029
  if choice == "v":
3030
    choices.pop(1)
3031
    choice = AskUser(msg + affected, choices)
3032
  return choice