Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 25be0c75

History | View | Annotate | Download (93.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "AUTO_PROMOTE_OPT",
55
  "AUTO_REPLACE_OPT",
56
  "BACKEND_OPT",
57
  "BLK_OS_OPT",
58
  "CAPAB_MASTER_OPT",
59
  "CAPAB_VM_OPT",
60
  "CLEANUP_OPT",
61
  "CLUSTER_DOMAIN_SECRET_OPT",
62
  "CONFIRM_OPT",
63
  "CP_SIZE_OPT",
64
  "DEBUG_OPT",
65
  "DEBUG_SIMERR_OPT",
66
  "DISKIDX_OPT",
67
  "DISK_OPT",
68
  "DISK_TEMPLATE_OPT",
69
  "DRAINED_OPT",
70
  "DRY_RUN_OPT",
71
  "DRBD_HELPER_OPT",
72
  "EARLY_RELEASE_OPT",
73
  "ENABLED_HV_OPT",
74
  "ERROR_CODES_OPT",
75
  "FIELDS_OPT",
76
  "FILESTORE_DIR_OPT",
77
  "FILESTORE_DRIVER_OPT",
78
  "FORCE_OPT",
79
  "FORCE_VARIANT_OPT",
80
  "GLOBAL_FILEDIR_OPT",
81
  "HID_OS_OPT",
82
  "HVLIST_OPT",
83
  "HVOPTS_OPT",
84
  "HYPERVISOR_OPT",
85
  "IALLOCATOR_OPT",
86
  "DEFAULT_IALLOCATOR_OPT",
87
  "IDENTIFY_DEFAULTS_OPT",
88
  "IGNORE_CONSIST_OPT",
89
  "IGNORE_FAILURES_OPT",
90
  "IGNORE_OFFLINE_OPT",
91
  "IGNORE_REMOVE_FAILURES_OPT",
92
  "IGNORE_SECONDARIES_OPT",
93
  "IGNORE_SIZE_OPT",
94
  "INTERVAL_OPT",
95
  "MAC_PREFIX_OPT",
96
  "MAINTAIN_NODE_HEALTH_OPT",
97
  "MASTER_NETDEV_OPT",
98
  "MC_OPT",
99
  "MIGRATION_MODE_OPT",
100
  "NET_OPT",
101
  "NEW_CLUSTER_CERT_OPT",
102
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103
  "NEW_CONFD_HMAC_KEY_OPT",
104
  "NEW_RAPI_CERT_OPT",
105
  "NEW_SECONDARY_OPT",
106
  "NIC_PARAMS_OPT",
107
  "NODE_LIST_OPT",
108
  "NODE_PLACEMENT_OPT",
109
  "NODEGROUP_OPT",
110
  "NODE_PARAMS_OPT",
111
  "NODE_POWERED_OPT",
112
  "NODRBD_STORAGE_OPT",
113
  "NOHDR_OPT",
114
  "NOIPCHECK_OPT",
115
  "NO_INSTALL_OPT",
116
  "NONAMECHECK_OPT",
117
  "NOLVM_STORAGE_OPT",
118
  "NOMODIFY_ETCHOSTS_OPT",
119
  "NOMODIFY_SSH_SETUP_OPT",
120
  "NONICS_OPT",
121
  "NONLIVE_OPT",
122
  "NONPLUS1_OPT",
123
  "NOSHUTDOWN_OPT",
124
  "NOSTART_OPT",
125
  "NOSSH_KEYCHECK_OPT",
126
  "NOVOTING_OPT",
127
  "NWSYNC_OPT",
128
  "ON_PRIMARY_OPT",
129
  "ON_SECONDARY_OPT",
130
  "OFFLINE_OPT",
131
  "OSPARAMS_OPT",
132
  "OS_OPT",
133
  "OS_SIZE_OPT",
134
  "PREALLOC_WIPE_DISKS_OPT",
135
  "PRIMARY_IP_VERSION_OPT",
136
  "PRIORITY_OPT",
137
  "RAPI_CERT_OPT",
138
  "READD_OPT",
139
  "REBOOT_TYPE_OPT",
140
  "REMOVE_INSTANCE_OPT",
141
  "REMOVE_UIDS_OPT",
142
  "RESERVED_LVS_OPT",
143
  "ROMAN_OPT",
144
  "SECONDARY_IP_OPT",
145
  "SELECT_OS_OPT",
146
  "SEP_OPT",
147
  "SHOWCMD_OPT",
148
  "SHUTDOWN_TIMEOUT_OPT",
149
  "SINGLE_NODE_OPT",
150
  "SRC_DIR_OPT",
151
  "SRC_NODE_OPT",
152
  "SUBMIT_OPT",
153
  "STATIC_OPT",
154
  "SYNC_OPT",
155
  "TAG_SRC_OPT",
156
  "TIMEOUT_OPT",
157
  "UIDPOOL_OPT",
158
  "USEUNITS_OPT",
159
  "USE_REPL_NET_OPT",
160
  "VERBOSE_OPT",
161
  "VG_NAME_OPT",
162
  "YES_DOIT_OPT",
163
  # Generic functions for CLI programs
164
  "GenericMain",
165
  "GenericInstanceCreate",
166
  "GenericList",
167
  "GenericListFields",
168
  "GetClient",
169
  "GetOnlineNodes",
170
  "JobExecutor",
171
  "JobSubmittedException",
172
  "ParseTimespec",
173
  "RunWhileClusterStopped",
174
  "SubmitOpCode",
175
  "SubmitOrSend",
176
  "UsesRPC",
177
  # Formatting functions
178
  "ToStderr", "ToStdout",
179
  "FormatError",
180
  "FormatQueryResult",
181
  "GenerateTable",
182
  "AskUser",
183
  "FormatTimestamp",
184
  "FormatLogMessage",
185
  # Tags functions
186
  "ListTags",
187
  "AddTags",
188
  "RemoveTags",
189
  # command line options support infrastructure
190
  "ARGS_MANY_INSTANCES",
191
  "ARGS_MANY_NODES",
192
  "ARGS_MANY_GROUPS",
193
  "ARGS_NONE",
194
  "ARGS_ONE_INSTANCE",
195
  "ARGS_ONE_NODE",
196
  "ARGS_ONE_GROUP",
197
  "ARGS_ONE_OS",
198
  "ArgChoice",
199
  "ArgCommand",
200
  "ArgFile",
201
  "ArgGroup",
202
  "ArgHost",
203
  "ArgInstance",
204
  "ArgJobId",
205
  "ArgNode",
206
  "ArgOs",
207
  "ArgSuggest",
208
  "ArgUnknown",
209
  "OPT_COMPL_INST_ADD_NODES",
210
  "OPT_COMPL_MANY_NODES",
211
  "OPT_COMPL_ONE_IALLOCATOR",
212
  "OPT_COMPL_ONE_INSTANCE",
213
  "OPT_COMPL_ONE_NODE",
214
  "OPT_COMPL_ONE_NODEGROUP",
215
  "OPT_COMPL_ONE_OS",
216
  "cli_option",
217
  "SplitNodeOption",
218
  "CalculateOSNames",
219
  "ParseFields",
220
  "COMMON_CREATE_OPTS",
221
  ]
222

    
223
NO_PREFIX = "no_"
224
UN_PREFIX = "-"
225

    
226
#: Priorities (sorted)
227
_PRIORITY_NAMES = [
228
  ("low", constants.OP_PRIO_LOW),
229
  ("normal", constants.OP_PRIO_NORMAL),
230
  ("high", constants.OP_PRIO_HIGH),
231
  ]
232

    
233
#: Priority dictionary for easier lookup
234
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
235
# we migrate to Python 2.6
236
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
237

    
238
# Query result status for clients
239
(QR_NORMAL,
240
 QR_UNKNOWN,
241
 QR_INCOMPLETE) = range(3)
242

    
243

    
244
class _Argument:
245
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
246
    self.min = min
247
    self.max = max
248

    
249
  def __repr__(self):
250
    return ("<%s min=%s max=%s>" %
251
            (self.__class__.__name__, self.min, self.max))
252

    
253

    
254
class ArgSuggest(_Argument):
255
  """Suggesting argument.
256

257
  Value can be any of the ones passed to the constructor.
258

259
  """
260
  # pylint: disable-msg=W0622
261
  def __init__(self, min=0, max=None, choices=None):
262
    _Argument.__init__(self, min=min, max=max)
263
    self.choices = choices
264

    
265
  def __repr__(self):
266
    return ("<%s min=%s max=%s choices=%r>" %
267
            (self.__class__.__name__, self.min, self.max, self.choices))
268

    
269

    
270
class ArgChoice(ArgSuggest):
271
  """Choice argument.
272

273
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
274
  but value must be one of the choices.
275

276
  """
277

    
278

    
279
class ArgUnknown(_Argument):
280
  """Unknown argument to program (e.g. determined at runtime).
281

282
  """
283

    
284

    
285
class ArgInstance(_Argument):
286
  """Instances argument.
287

288
  """
289

    
290

    
291
class ArgNode(_Argument):
292
  """Node argument.
293

294
  """
295

    
296

    
297
class ArgGroup(_Argument):
298
  """Node group argument.
299

300
  """
301

    
302

    
303
class ArgJobId(_Argument):
304
  """Job ID argument.
305

306
  """
307

    
308

    
309
class ArgFile(_Argument):
310
  """File path argument.
311

312
  """
313

    
314

    
315
class ArgCommand(_Argument):
316
  """Command argument.
317

318
  """
319

    
320

    
321
class ArgHost(_Argument):
322
  """Host argument.
323

324
  """
325

    
326

    
327
class ArgOs(_Argument):
328
  """OS argument.
329

330
  """
331

    
332

    
333
ARGS_NONE = []
334
ARGS_MANY_INSTANCES = [ArgInstance()]
335
ARGS_MANY_NODES = [ArgNode()]
336
ARGS_MANY_GROUPS = [ArgGroup()]
337
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
338
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
339
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
340
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
341

    
342

    
343
def _ExtractTagsObject(opts, args):
344
  """Extract the tag type object.
345

346
  Note that this function will modify its args parameter.
347

348
  """
349
  if not hasattr(opts, "tag_type"):
350
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
351
  kind = opts.tag_type
352
  if kind == constants.TAG_CLUSTER:
353
    retval = kind, kind
354
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
355
    if not args:
356
      raise errors.OpPrereqError("no arguments passed to the command")
357
    name = args.pop(0)
358
    retval = kind, name
359
  else:
360
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
361
  return retval
362

    
363

    
364
def _ExtendTags(opts, args):
365
  """Extend the args if a source file has been given.
366

367
  This function will extend the tags with the contents of the file
368
  passed in the 'tags_source' attribute of the opts parameter. A file
369
  named '-' will be replaced by stdin.
370

371
  """
372
  fname = opts.tags_source
373
  if fname is None:
374
    return
375
  if fname == "-":
376
    new_fh = sys.stdin
377
  else:
378
    new_fh = open(fname, "r")
379
  new_data = []
380
  try:
381
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
382
    # because of python bug 1633941
383
    while True:
384
      line = new_fh.readline()
385
      if not line:
386
        break
387
      new_data.append(line.strip())
388
  finally:
389
    new_fh.close()
390
  args.extend(new_data)
391

    
392

    
393
def ListTags(opts, args):
394
  """List the tags on a given object.
395

396
  This is a generic implementation that knows how to deal with all
397
  three cases of tag objects (cluster, node, instance). The opts
398
  argument is expected to contain a tag_type field denoting what
399
  object type we work on.
400

401
  """
402
  kind, name = _ExtractTagsObject(opts, args)
403
  cl = GetClient()
404
  result = cl.QueryTags(kind, name)
405
  result = list(result)
406
  result.sort()
407
  for tag in result:
408
    ToStdout(tag)
409

    
410

    
411
def AddTags(opts, args):
412
  """Add tags on a given object.
413

414
  This is a generic implementation that knows how to deal with all
415
  three cases of tag objects (cluster, node, instance). The opts
416
  argument is expected to contain a tag_type field denoting what
417
  object type we work on.
418

419
  """
420
  kind, name = _ExtractTagsObject(opts, args)
421
  _ExtendTags(opts, args)
422
  if not args:
423
    raise errors.OpPrereqError("No tags to be added")
424
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
425
  SubmitOpCode(op, opts=opts)
426

    
427

    
428
def RemoveTags(opts, args):
429
  """Remove tags from a given object.
430

431
  This is a generic implementation that knows how to deal with all
432
  three cases of tag objects (cluster, node, instance). The opts
433
  argument is expected to contain a tag_type field denoting what
434
  object type we work on.
435

436
  """
437
  kind, name = _ExtractTagsObject(opts, args)
438
  _ExtendTags(opts, args)
439
  if not args:
440
    raise errors.OpPrereqError("No tags to be removed")
441
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
442
  SubmitOpCode(op, opts=opts)
443

    
444

    
445
def check_unit(option, opt, value): # pylint: disable-msg=W0613
446
  """OptParsers custom converter for units.
447

448
  """
449
  try:
450
    return utils.ParseUnit(value)
451
  except errors.UnitParseError, err:
452
    raise OptionValueError("option %s: %s" % (opt, err))
453

    
454

    
455
def _SplitKeyVal(opt, data):
456
  """Convert a KeyVal string into a dict.
457

458
  This function will convert a key=val[,...] string into a dict. Empty
459
  values will be converted specially: keys which have the prefix 'no_'
460
  will have the value=False and the prefix stripped, the others will
461
  have value=True.
462

463
  @type opt: string
464
  @param opt: a string holding the option name for which we process the
465
      data, used in building error messages
466
  @type data: string
467
  @param data: a string of the format key=val,key=val,...
468
  @rtype: dict
469
  @return: {key=val, key=val}
470
  @raises errors.ParameterError: if there are duplicate keys
471

472
  """
473
  kv_dict = {}
474
  if data:
475
    for elem in utils.UnescapeAndSplit(data, sep=","):
476
      if "=" in elem:
477
        key, val = elem.split("=", 1)
478
      else:
479
        if elem.startswith(NO_PREFIX):
480
          key, val = elem[len(NO_PREFIX):], False
481
        elif elem.startswith(UN_PREFIX):
482
          key, val = elem[len(UN_PREFIX):], None
483
        else:
484
          key, val = elem, True
485
      if key in kv_dict:
486
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
487
                                    (key, opt))
488
      kv_dict[key] = val
489
  return kv_dict
490

    
491

    
492
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
493
  """Custom parser for ident:key=val,key=val options.
494

495
  This will store the parsed values as a tuple (ident, {key: val}). As such,
496
  multiple uses of this option via action=append is possible.
497

498
  """
499
  if ":" not in value:
500
    ident, rest = value, ''
501
  else:
502
    ident, rest = value.split(":", 1)
503

    
504
  if ident.startswith(NO_PREFIX):
505
    if rest:
506
      msg = "Cannot pass options when removing parameter groups: %s" % value
507
      raise errors.ParameterError(msg)
508
    retval = (ident[len(NO_PREFIX):], False)
509
  elif ident.startswith(UN_PREFIX):
510
    if rest:
511
      msg = "Cannot pass options when removing parameter groups: %s" % value
512
      raise errors.ParameterError(msg)
513
    retval = (ident[len(UN_PREFIX):], None)
514
  else:
515
    kv_dict = _SplitKeyVal(opt, rest)
516
    retval = (ident, kv_dict)
517
  return retval
518

    
519

    
520
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
521
  """Custom parser class for key=val,key=val options.
522

523
  This will store the parsed values as a dict {key: val}.
524

525
  """
526
  return _SplitKeyVal(opt, value)
527

    
528

    
529
def check_bool(option, opt, value): # pylint: disable-msg=W0613
530
  """Custom parser for yes/no options.
531

532
  This will store the parsed value as either True or False.
533

534
  """
535
  value = value.lower()
536
  if value == constants.VALUE_FALSE or value == "no":
537
    return False
538
  elif value == constants.VALUE_TRUE or value == "yes":
539
    return True
540
  else:
541
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
542

    
543

    
544
# completion_suggestion is normally a list. Using numeric values not evaluating
545
# to False for dynamic completion.
546
(OPT_COMPL_MANY_NODES,
547
 OPT_COMPL_ONE_NODE,
548
 OPT_COMPL_ONE_INSTANCE,
549
 OPT_COMPL_ONE_OS,
550
 OPT_COMPL_ONE_IALLOCATOR,
551
 OPT_COMPL_INST_ADD_NODES,
552
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
553

    
554
OPT_COMPL_ALL = frozenset([
555
  OPT_COMPL_MANY_NODES,
556
  OPT_COMPL_ONE_NODE,
557
  OPT_COMPL_ONE_INSTANCE,
558
  OPT_COMPL_ONE_OS,
559
  OPT_COMPL_ONE_IALLOCATOR,
560
  OPT_COMPL_INST_ADD_NODES,
561
  OPT_COMPL_ONE_NODEGROUP,
562
  ])
563

    
564

    
565
class CliOption(Option):
566
  """Custom option class for optparse.
567

568
  """
569
  ATTRS = Option.ATTRS + [
570
    "completion_suggest",
571
    ]
572
  TYPES = Option.TYPES + (
573
    "identkeyval",
574
    "keyval",
575
    "unit",
576
    "bool",
577
    )
578
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
579
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
580
  TYPE_CHECKER["keyval"] = check_key_val
581
  TYPE_CHECKER["unit"] = check_unit
582
  TYPE_CHECKER["bool"] = check_bool
583

    
584

    
585
# optparse.py sets make_option, so we do it for our own option class, too
586
cli_option = CliOption
587

    
588

    
589
_YORNO = "yes|no"
590

    
591
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
592
                       help="Increase debugging level")
593

    
594
NOHDR_OPT = cli_option("--no-headers", default=False,
595
                       action="store_true", dest="no_headers",
596
                       help="Don't display column headers")
597

    
598
SEP_OPT = cli_option("--separator", default=None,
599
                     action="store", dest="separator",
600
                     help=("Separator between output fields"
601
                           " (defaults to one space)"))
602

    
603
USEUNITS_OPT = cli_option("--units", default=None,
604
                          dest="units", choices=('h', 'm', 'g', 't'),
605
                          help="Specify units for output (one of h/m/g/t)")
606

    
607
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
608
                        type="string", metavar="FIELDS",
609
                        help="Comma separated list of output fields")
610

    
611
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
612
                       default=False, help="Force the operation")
613

    
614
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
615
                         default=False, help="Do not require confirmation")
616

    
617
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
618
                                  action="store_true", default=False,
619
                                  help=("Ignore offline nodes and do as much"
620
                                        " as possible"))
621

    
622
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
623
                         default=None, help="File with tag names")
624

    
625
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
626
                        default=False, action="store_true",
627
                        help=("Submit the job and return the job ID, but"
628
                              " don't wait for the job to finish"))
629

    
630
SYNC_OPT = cli_option("--sync", dest="do_locking",
631
                      default=False, action="store_true",
632
                      help=("Grab locks while doing the queries"
633
                            " in order to ensure more consistent results"))
634

    
635
DRY_RUN_OPT = cli_option("--dry-run", default=False,
636
                         action="store_true",
637
                         help=("Do not execute the operation, just run the"
638
                               " check steps and verify it it could be"
639
                               " executed"))
640

    
641
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
642
                         action="store_true",
643
                         help="Increase the verbosity of the operation")
644

    
645
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
646
                              action="store_true", dest="simulate_errors",
647
                              help="Debugging option that makes the operation"
648
                              " treat most runtime checks as failed")
649

    
650
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
651
                        default=True, action="store_false",
652
                        help="Don't wait for sync (DANGEROUS!)")
653

    
654
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
655
                               help="Custom disk setup (diskless, file,"
656
                               " plain or drbd)",
657
                               default=None, metavar="TEMPL",
658
                               choices=list(constants.DISK_TEMPLATES))
659

    
660
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
661
                        help="Do not create any network cards for"
662
                        " the instance")
663

    
664
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
665
                               help="Relative path under default cluster-wide"
666
                               " file storage dir to store file-based disks",
667
                               default=None, metavar="<DIR>")
668

    
669
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
670
                                  help="Driver to use for image files",
671
                                  default="loop", metavar="<DRIVER>",
672
                                  choices=list(constants.FILE_DRIVER))
673

    
674
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
675
                            help="Select nodes for the instance automatically"
676
                            " using the <NAME> iallocator plugin",
677
                            default=None, type="string",
678
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
679

    
680
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
681
                            metavar="<NAME>",
682
                            help="Set the default instance allocator plugin",
683
                            default=None, type="string",
684
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
685

    
686
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
687
                    metavar="<os>",
688
                    completion_suggest=OPT_COMPL_ONE_OS)
689

    
690
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
691
                         type="keyval", default={},
692
                         help="OS parameters")
693

    
694
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
695
                               action="store_true", default=False,
696
                               help="Force an unknown variant")
697

    
698
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
699
                            action="store_true", default=False,
700
                            help="Do not install the OS (will"
701
                            " enable no-start)")
702

    
703
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
704
                         type="keyval", default={},
705
                         help="Backend parameters")
706

    
707
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
708
                         default={}, dest="hvparams",
709
                         help="Hypervisor parameters")
710

    
711
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
712
                            help="Hypervisor and hypervisor options, in the"
713
                            " format hypervisor:option=value,option=value,...",
714
                            default=None, type="identkeyval")
715

    
716
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
717
                        help="Hypervisor and hypervisor options, in the"
718
                        " format hypervisor:option=value,option=value,...",
719
                        default=[], action="append", type="identkeyval")
720

    
721
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
722
                           action="store_false",
723
                           help="Don't check that the instance's IP"
724
                           " is alive")
725

    
726
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
727
                             default=True, action="store_false",
728
                             help="Don't check that the instance's name"
729
                             " is resolvable")
730

    
731
NET_OPT = cli_option("--net",
732
                     help="NIC parameters", default=[],
733
                     dest="nics", action="append", type="identkeyval")
734

    
735
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
736
                      dest="disks", action="append", type="identkeyval")
737

    
738
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
739
                         help="Comma-separated list of disks"
740
                         " indices to act on (e.g. 0,2) (optional,"
741
                         " defaults to all disks)")
742

    
743
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
744
                         help="Enforces a single-disk configuration using the"
745
                         " given disk size, in MiB unless a suffix is used",
746
                         default=None, type="unit", metavar="<size>")
747

    
748
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
749
                                dest="ignore_consistency",
750
                                action="store_true", default=False,
751
                                help="Ignore the consistency of the disks on"
752
                                " the secondary")
753

    
754
NONLIVE_OPT = cli_option("--non-live", dest="live",
755
                         default=True, action="store_false",
756
                         help="Do a non-live migration (this usually means"
757
                         " freeze the instance, save the state, transfer and"
758
                         " only then resume running on the secondary node)")
759

    
760
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
761
                                default=None,
762
                                choices=list(constants.HT_MIGRATION_MODES),
763
                                help="Override default migration mode (choose"
764
                                " either live or non-live")
765

    
766
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
767
                                help="Target node and optional secondary node",
768
                                metavar="<pnode>[:<snode>]",
769
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
770

    
771
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
772
                           action="append", metavar="<node>",
773
                           help="Use only this node (can be used multiple"
774
                           " times, if not given defaults to all nodes)",
775
                           completion_suggest=OPT_COMPL_ONE_NODE)
776

    
777
NODEGROUP_OPT = cli_option("-g", "--node-group",
778
                           dest="nodegroup",
779
                           help="Node group (name or uuid)",
780
                           metavar="<nodegroup>",
781
                           default=None, type="string",
782
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
783

    
784
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
785
                             metavar="<node>",
786
                             completion_suggest=OPT_COMPL_ONE_NODE)
787

    
788
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
789
                         action="store_false",
790
                         help="Don't start the instance after creation")
791

    
792
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
793
                         action="store_true", default=False,
794
                         help="Show command instead of executing it")
795

    
796
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
797
                         default=False, action="store_true",
798
                         help="Instead of performing the migration, try to"
799
                         " recover from a failed cleanup. This is safe"
800
                         " to run even if the instance is healthy, but it"
801
                         " will create extra replication traffic and "
802
                         " disrupt briefly the replication (like during the"
803
                         " migration")
804

    
805
STATIC_OPT = cli_option("-s", "--static", dest="static",
806
                        action="store_true", default=False,
807
                        help="Only show configuration data, not runtime data")
808

    
809
ALL_OPT = cli_option("--all", dest="show_all",
810
                     default=False, action="store_true",
811
                     help="Show info on all instances on the cluster."
812
                     " This can take a long time to run, use wisely")
813

    
814
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
815
                           action="store_true", default=False,
816
                           help="Interactive OS reinstall, lists available"
817
                           " OS templates for selection")
818

    
819
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
820
                                 action="store_true", default=False,
821
                                 help="Remove the instance from the cluster"
822
                                 " configuration even if there are failures"
823
                                 " during the removal process")
824

    
825
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
826
                                        dest="ignore_remove_failures",
827
                                        action="store_true", default=False,
828
                                        help="Remove the instance from the"
829
                                        " cluster configuration even if there"
830
                                        " are failures during the removal"
831
                                        " process")
832

    
833
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
834
                                 action="store_true", default=False,
835
                                 help="Remove the instance from the cluster")
836

    
837
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
838
                               help="Specifies the new secondary node",
839
                               metavar="NODE", default=None,
840
                               completion_suggest=OPT_COMPL_ONE_NODE)
841

    
842
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
843
                            default=False, action="store_true",
844
                            help="Replace the disk(s) on the primary"
845
                            " node (only for the drbd template)")
846

    
847
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
848
                              default=False, action="store_true",
849
                              help="Replace the disk(s) on the secondary"
850
                              " node (only for the drbd template)")
851

    
852
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
853
                              default=False, action="store_true",
854
                              help="Lock all nodes and auto-promote as needed"
855
                              " to MC status")
856

    
857
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
858
                              default=False, action="store_true",
859
                              help="Automatically replace faulty disks"
860
                              " (only for the drbd template)")
861

    
862
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
863
                             default=False, action="store_true",
864
                             help="Ignore current recorded size"
865
                             " (useful for forcing activation when"
866
                             " the recorded size is wrong)")
867

    
868
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
869
                          metavar="<node>",
870
                          completion_suggest=OPT_COMPL_ONE_NODE)
871

    
872
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
873
                         metavar="<dir>")
874

    
875
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
876
                              help="Specify the secondary ip for the node",
877
                              metavar="ADDRESS", default=None)
878

    
879
READD_OPT = cli_option("--readd", dest="readd",
880
                       default=False, action="store_true",
881
                       help="Readd old node after replacing it")
882

    
883
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
884
                                default=True, action="store_false",
885
                                help="Disable SSH key fingerprint checking")
886

    
887

    
888
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
889
                    type="bool", default=None, metavar=_YORNO,
890
                    help="Set the master_candidate flag on the node")
891

    
892
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
893
                         type="bool", default=None,
894
                         help="Set the offline flag on the node")
895

    
896
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
897
                         type="bool", default=None,
898
                         help="Set the drained flag on the node")
899

    
900
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
901
                    type="bool", default=None, metavar=_YORNO,
902
                    help="Set the master_capable flag on the node")
903

    
904
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
905
                    type="bool", default=None, metavar=_YORNO,
906
                    help="Set the vm_capable flag on the node")
907

    
908
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
909
                             type="bool", default=None, metavar=_YORNO,
910
                             help="Set the allocatable flag on a volume")
911

    
912
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
913
                               help="Disable support for lvm based instances"
914
                               " (cluster-wide)",
915
                               action="store_false", default=True)
916

    
917
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
918
                            dest="enabled_hypervisors",
919
                            help="Comma-separated list of hypervisors",
920
                            type="string", default=None)
921

    
922
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
923
                            type="keyval", default={},
924
                            help="NIC parameters")
925

    
926
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
927
                         dest="candidate_pool_size", type="int",
928
                         help="Set the candidate pool size")
929

    
930
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
931
                         help="Enables LVM and specifies the volume group"
932
                         " name (cluster-wide) for disk allocation [xenvg]",
933
                         metavar="VG", default=None)
934

    
935
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
936
                          help="Destroy cluster", action="store_true")
937

    
938
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
939
                          help="Skip node agreement check (dangerous)",
940
                          action="store_true", default=False)
941

    
942
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
943
                            help="Specify the mac prefix for the instance IP"
944
                            " addresses, in the format XX:XX:XX",
945
                            metavar="PREFIX",
946
                            default=None)
947

    
948
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
949
                               help="Specify the node interface (cluster-wide)"
950
                               " on which the master IP address will be added"
951
                               " (cluster init default: %s)" %
952
                               constants.DEFAULT_BRIDGE,
953
                               metavar="NETDEV",
954
                               default=None)
955

    
956
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
957
                                help="Specify the default directory (cluster-"
958
                                "wide) for storing the file-based disks [%s]" %
959
                                constants.DEFAULT_FILE_STORAGE_DIR,
960
                                metavar="DIR",
961
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
962

    
963
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
964
                                   help="Don't modify /etc/hosts",
965
                                   action="store_false", default=True)
966

    
967
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
968
                                    help="Don't initialize SSH keys",
969
                                    action="store_false", default=True)
970

    
971
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
972
                             help="Enable parseable error messages",
973
                             action="store_true", default=False)
974

    
975
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
976
                          help="Skip N+1 memory redundancy tests",
977
                          action="store_true", default=False)
978

    
979
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
980
                             help="Type of reboot: soft/hard/full",
981
                             default=constants.INSTANCE_REBOOT_HARD,
982
                             metavar="<REBOOT>",
983
                             choices=list(constants.REBOOT_TYPES))
984

    
985
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
986
                                    dest="ignore_secondaries",
987
                                    default=False, action="store_true",
988
                                    help="Ignore errors from secondaries")
989

    
990
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
991
                            action="store_false", default=True,
992
                            help="Don't shutdown the instance (unsafe)")
993

    
994
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
995
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
996
                         help="Maximum time to wait")
997

    
998
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
999
                         dest="shutdown_timeout", type="int",
1000
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1001
                         help="Maximum time to wait for instance shutdown")
1002

    
1003
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1004
                          default=None,
1005
                          help=("Number of seconds between repetions of the"
1006
                                " command"))
1007

    
1008
EARLY_RELEASE_OPT = cli_option("--early-release",
1009
                               dest="early_release", default=False,
1010
                               action="store_true",
1011
                               help="Release the locks on the secondary"
1012
                               " node(s) early")
1013

    
1014
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1015
                                  dest="new_cluster_cert",
1016
                                  default=False, action="store_true",
1017
                                  help="Generate a new cluster certificate")
1018

    
1019
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1020
                           default=None,
1021
                           help="File containing new RAPI certificate")
1022

    
1023
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1024
                               default=None, action="store_true",
1025
                               help=("Generate a new self-signed RAPI"
1026
                                     " certificate"))
1027

    
1028
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1029
                                    dest="new_confd_hmac_key",
1030
                                    default=False, action="store_true",
1031
                                    help=("Create a new HMAC key for %s" %
1032
                                          constants.CONFD))
1033

    
1034
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1035
                                       dest="cluster_domain_secret",
1036
                                       default=None,
1037
                                       help=("Load new new cluster domain"
1038
                                             " secret from file"))
1039

    
1040
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1041
                                           dest="new_cluster_domain_secret",
1042
                                           default=False, action="store_true",
1043
                                           help=("Create a new cluster domain"
1044
                                                 " secret"))
1045

    
1046
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1047
                              dest="use_replication_network",
1048
                              help="Whether to use the replication network"
1049
                              " for talking to the nodes",
1050
                              action="store_true", default=False)
1051

    
1052
MAINTAIN_NODE_HEALTH_OPT = \
1053
    cli_option("--maintain-node-health", dest="maintain_node_health",
1054
               metavar=_YORNO, default=None, type="bool",
1055
               help="Configure the cluster to automatically maintain node"
1056
               " health, by shutting down unknown instances, shutting down"
1057
               " unknown DRBD devices, etc.")
1058

    
1059
IDENTIFY_DEFAULTS_OPT = \
1060
    cli_option("--identify-defaults", dest="identify_defaults",
1061
               default=False, action="store_true",
1062
               help="Identify which saved instance parameters are equal to"
1063
               " the current cluster defaults and set them as such, instead"
1064
               " of marking them as overridden")
1065

    
1066
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1067
                         action="store", dest="uid_pool",
1068
                         help=("A list of user-ids or user-id"
1069
                               " ranges separated by commas"))
1070

    
1071
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1072
                          action="store", dest="add_uids",
1073
                          help=("A list of user-ids or user-id"
1074
                                " ranges separated by commas, to be"
1075
                                " added to the user-id pool"))
1076

    
1077
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1078
                             action="store", dest="remove_uids",
1079
                             help=("A list of user-ids or user-id"
1080
                                   " ranges separated by commas, to be"
1081
                                   " removed from the user-id pool"))
1082

    
1083
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1084
                             action="store", dest="reserved_lvs",
1085
                             help=("A comma-separated list of reserved"
1086
                                   " logical volumes names, that will be"
1087
                                   " ignored by cluster verify"))
1088

    
1089
ROMAN_OPT = cli_option("--roman",
1090
                       dest="roman_integers", default=False,
1091
                       action="store_true",
1092
                       help="Use roman numbers for positive integers")
1093

    
1094
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1095
                             action="store", default=None,
1096
                             help="Specifies usermode helper for DRBD")
1097

    
1098
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1099
                                action="store_false", default=True,
1100
                                help="Disable support for DRBD")
1101

    
1102
PRIMARY_IP_VERSION_OPT = \
1103
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1104
               action="store", dest="primary_ip_version",
1105
               metavar="%d|%d" % (constants.IP4_VERSION,
1106
                                  constants.IP6_VERSION),
1107
               help="Cluster-wide IP version for primary IP")
1108

    
1109
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1110
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1111
                          choices=_PRIONAME_TO_VALUE.keys(),
1112
                          help="Priority for opcode processing")
1113

    
1114
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1115
                        type="bool", default=None, metavar=_YORNO,
1116
                        help="Sets the hidden flag on the OS")
1117

    
1118
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1119
                        type="bool", default=None, metavar=_YORNO,
1120
                        help="Sets the blacklisted flag on the OS")
1121

    
1122
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1123
                                     type="bool", metavar=_YORNO,
1124
                                     dest="prealloc_wipe_disks",
1125
                                     help=("Wipe disks prior to instance"
1126
                                           " creation"))
1127

    
1128
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1129
                             type="keyval", default=None,
1130
                             help="Node parameters")
1131

    
1132
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1133
                              action="store", metavar="POLICY", default=None,
1134
                              help="Allocation policy for the node group")
1135

    
1136
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1137
                              type="bool", metavar=_YORNO,
1138
                              dest="node_powered",
1139
                              help="Specify if the SoR for node is powered")
1140

    
1141

    
1142
#: Options provided by all commands
1143
COMMON_OPTS = [DEBUG_OPT]
1144

    
1145
# common options for creating instances. add and import then add their own
1146
# specific ones.
1147
COMMON_CREATE_OPTS = [
1148
  BACKEND_OPT,
1149
  DISK_OPT,
1150
  DISK_TEMPLATE_OPT,
1151
  FILESTORE_DIR_OPT,
1152
  FILESTORE_DRIVER_OPT,
1153
  HYPERVISOR_OPT,
1154
  IALLOCATOR_OPT,
1155
  NET_OPT,
1156
  NODE_PLACEMENT_OPT,
1157
  NOIPCHECK_OPT,
1158
  NONAMECHECK_OPT,
1159
  NONICS_OPT,
1160
  NWSYNC_OPT,
1161
  OSPARAMS_OPT,
1162
  OS_SIZE_OPT,
1163
  SUBMIT_OPT,
1164
  DRY_RUN_OPT,
1165
  PRIORITY_OPT,
1166
  ]
1167

    
1168

    
1169
def _ParseArgs(argv, commands, aliases):
1170
  """Parser for the command line arguments.
1171

1172
  This function parses the arguments and returns the function which
1173
  must be executed together with its (modified) arguments.
1174

1175
  @param argv: the command line
1176
  @param commands: dictionary with special contents, see the design
1177
      doc for cmdline handling
1178
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1179

1180
  """
1181
  if len(argv) == 0:
1182
    binary = "<command>"
1183
  else:
1184
    binary = argv[0].split("/")[-1]
1185

    
1186
  if len(argv) > 1 and argv[1] == "--version":
1187
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1188
             constants.RELEASE_VERSION)
1189
    # Quit right away. That way we don't have to care about this special
1190
    # argument. optparse.py does it the same.
1191
    sys.exit(0)
1192

    
1193
  if len(argv) < 2 or not (argv[1] in commands or
1194
                           argv[1] in aliases):
1195
    # let's do a nice thing
1196
    sortedcmds = commands.keys()
1197
    sortedcmds.sort()
1198

    
1199
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1200
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1201
    ToStdout("")
1202

    
1203
    # compute the max line length for cmd + usage
1204
    mlen = max([len(" %s" % cmd) for cmd in commands])
1205
    mlen = min(60, mlen) # should not get here...
1206

    
1207
    # and format a nice command list
1208
    ToStdout("Commands:")
1209
    for cmd in sortedcmds:
1210
      cmdstr = " %s" % (cmd,)
1211
      help_text = commands[cmd][4]
1212
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1213
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1214
      for line in help_lines:
1215
        ToStdout("%-*s   %s", mlen, "", line)
1216

    
1217
    ToStdout("")
1218

    
1219
    return None, None, None
1220

    
1221
  # get command, unalias it, and look it up in commands
1222
  cmd = argv.pop(1)
1223
  if cmd in aliases:
1224
    if cmd in commands:
1225
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1226
                                   " command" % cmd)
1227

    
1228
    if aliases[cmd] not in commands:
1229
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1230
                                   " command '%s'" % (cmd, aliases[cmd]))
1231

    
1232
    cmd = aliases[cmd]
1233

    
1234
  func, args_def, parser_opts, usage, description = commands[cmd]
1235
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1236
                        description=description,
1237
                        formatter=TitledHelpFormatter(),
1238
                        usage="%%prog %s %s" % (cmd, usage))
1239
  parser.disable_interspersed_args()
1240
  options, args = parser.parse_args()
1241

    
1242
  if not _CheckArguments(cmd, args_def, args):
1243
    return None, None, None
1244

    
1245
  return func, options, args
1246

    
1247

    
1248
def _CheckArguments(cmd, args_def, args):
1249
  """Verifies the arguments using the argument definition.
1250

1251
  Algorithm:
1252

1253
    1. Abort with error if values specified by user but none expected.
1254

1255
    1. For each argument in definition
1256

1257
      1. Keep running count of minimum number of values (min_count)
1258
      1. Keep running count of maximum number of values (max_count)
1259
      1. If it has an unlimited number of values
1260

1261
        1. Abort with error if it's not the last argument in the definition
1262

1263
    1. If last argument has limited number of values
1264

1265
      1. Abort with error if number of values doesn't match or is too large
1266

1267
    1. Abort with error if user didn't pass enough values (min_count)
1268

1269
  """
1270
  if args and not args_def:
1271
    ToStderr("Error: Command %s expects no arguments", cmd)
1272
    return False
1273

    
1274
  min_count = None
1275
  max_count = None
1276
  check_max = None
1277

    
1278
  last_idx = len(args_def) - 1
1279

    
1280
  for idx, arg in enumerate(args_def):
1281
    if min_count is None:
1282
      min_count = arg.min
1283
    elif arg.min is not None:
1284
      min_count += arg.min
1285

    
1286
    if max_count is None:
1287
      max_count = arg.max
1288
    elif arg.max is not None:
1289
      max_count += arg.max
1290

    
1291
    if idx == last_idx:
1292
      check_max = (arg.max is not None)
1293

    
1294
    elif arg.max is None:
1295
      raise errors.ProgrammerError("Only the last argument can have max=None")
1296

    
1297
  if check_max:
1298
    # Command with exact number of arguments
1299
    if (min_count is not None and max_count is not None and
1300
        min_count == max_count and len(args) != min_count):
1301
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1302
      return False
1303

    
1304
    # Command with limited number of arguments
1305
    if max_count is not None and len(args) > max_count:
1306
      ToStderr("Error: Command %s expects only %d argument(s)",
1307
               cmd, max_count)
1308
      return False
1309

    
1310
  # Command with some required arguments
1311
  if min_count is not None and len(args) < min_count:
1312
    ToStderr("Error: Command %s expects at least %d argument(s)",
1313
             cmd, min_count)
1314
    return False
1315

    
1316
  return True
1317

    
1318

    
1319
def SplitNodeOption(value):
1320
  """Splits the value of a --node option.
1321

1322
  """
1323
  if value and ':' in value:
1324
    return value.split(':', 1)
1325
  else:
1326
    return (value, None)
1327

    
1328

    
1329
def CalculateOSNames(os_name, os_variants):
1330
  """Calculates all the names an OS can be called, according to its variants.
1331

1332
  @type os_name: string
1333
  @param os_name: base name of the os
1334
  @type os_variants: list or None
1335
  @param os_variants: list of supported variants
1336
  @rtype: list
1337
  @return: list of valid names
1338

1339
  """
1340
  if os_variants:
1341
    return ['%s+%s' % (os_name, v) for v in os_variants]
1342
  else:
1343
    return [os_name]
1344

    
1345

    
1346
def ParseFields(selected, default):
1347
  """Parses the values of "--field"-like options.
1348

1349
  @type selected: string or None
1350
  @param selected: User-selected options
1351
  @type default: list
1352
  @param default: Default fields
1353

1354
  """
1355
  if selected is None:
1356
    return default
1357

    
1358
  if selected.startswith("+"):
1359
    return default + selected[1:].split(",")
1360

    
1361
  return selected.split(",")
1362

    
1363

    
1364
UsesRPC = rpc.RunWithRPC
1365

    
1366

    
1367
def AskUser(text, choices=None):
1368
  """Ask the user a question.
1369

1370
  @param text: the question to ask
1371

1372
  @param choices: list with elements tuples (input_char, return_value,
1373
      description); if not given, it will default to: [('y', True,
1374
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1375
      note that the '?' char is reserved for help
1376

1377
  @return: one of the return values from the choices list; if input is
1378
      not possible (i.e. not running with a tty, we return the last
1379
      entry from the list
1380

1381
  """
1382
  if choices is None:
1383
    choices = [('y', True, 'Perform the operation'),
1384
               ('n', False, 'Do not perform the operation')]
1385
  if not choices or not isinstance(choices, list):
1386
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1387
  for entry in choices:
1388
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1389
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1390

    
1391
  answer = choices[-1][1]
1392
  new_text = []
1393
  for line in text.splitlines():
1394
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1395
  text = "\n".join(new_text)
1396
  try:
1397
    f = file("/dev/tty", "a+")
1398
  except IOError:
1399
    return answer
1400
  try:
1401
    chars = [entry[0] for entry in choices]
1402
    chars[-1] = "[%s]" % chars[-1]
1403
    chars.append('?')
1404
    maps = dict([(entry[0], entry[1]) for entry in choices])
1405
    while True:
1406
      f.write(text)
1407
      f.write('\n')
1408
      f.write("/".join(chars))
1409
      f.write(": ")
1410
      line = f.readline(2).strip().lower()
1411
      if line in maps:
1412
        answer = maps[line]
1413
        break
1414
      elif line == '?':
1415
        for entry in choices:
1416
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1417
        f.write("\n")
1418
        continue
1419
  finally:
1420
    f.close()
1421
  return answer
1422

    
1423

    
1424
class JobSubmittedException(Exception):
1425
  """Job was submitted, client should exit.
1426

1427
  This exception has one argument, the ID of the job that was
1428
  submitted. The handler should print this ID.
1429

1430
  This is not an error, just a structured way to exit from clients.
1431

1432
  """
1433

    
1434

    
1435
def SendJob(ops, cl=None):
1436
  """Function to submit an opcode without waiting for the results.
1437

1438
  @type ops: list
1439
  @param ops: list of opcodes
1440
  @type cl: luxi.Client
1441
  @param cl: the luxi client to use for communicating with the master;
1442
             if None, a new client will be created
1443

1444
  """
1445
  if cl is None:
1446
    cl = GetClient()
1447

    
1448
  job_id = cl.SubmitJob(ops)
1449

    
1450
  return job_id
1451

    
1452

    
1453
def GenericPollJob(job_id, cbs, report_cbs):
1454
  """Generic job-polling function.
1455

1456
  @type job_id: number
1457
  @param job_id: Job ID
1458
  @type cbs: Instance of L{JobPollCbBase}
1459
  @param cbs: Data callbacks
1460
  @type report_cbs: Instance of L{JobPollReportCbBase}
1461
  @param report_cbs: Reporting callbacks
1462

1463
  """
1464
  prev_job_info = None
1465
  prev_logmsg_serial = None
1466

    
1467
  status = None
1468

    
1469
  while True:
1470
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1471
                                      prev_logmsg_serial)
1472
    if not result:
1473
      # job not found, go away!
1474
      raise errors.JobLost("Job with id %s lost" % job_id)
1475

    
1476
    if result == constants.JOB_NOTCHANGED:
1477
      report_cbs.ReportNotChanged(job_id, status)
1478

    
1479
      # Wait again
1480
      continue
1481

    
1482
    # Split result, a tuple of (field values, log entries)
1483
    (job_info, log_entries) = result
1484
    (status, ) = job_info
1485

    
1486
    if log_entries:
1487
      for log_entry in log_entries:
1488
        (serial, timestamp, log_type, message) = log_entry
1489
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1490
                                    log_type, message)
1491
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1492

    
1493
    # TODO: Handle canceled and archived jobs
1494
    elif status in (constants.JOB_STATUS_SUCCESS,
1495
                    constants.JOB_STATUS_ERROR,
1496
                    constants.JOB_STATUS_CANCELING,
1497
                    constants.JOB_STATUS_CANCELED):
1498
      break
1499

    
1500
    prev_job_info = job_info
1501

    
1502
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1503
  if not jobs:
1504
    raise errors.JobLost("Job with id %s lost" % job_id)
1505

    
1506
  status, opstatus, result = jobs[0]
1507

    
1508
  if status == constants.JOB_STATUS_SUCCESS:
1509
    return result
1510

    
1511
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1512
    raise errors.OpExecError("Job was canceled")
1513

    
1514
  has_ok = False
1515
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1516
    if status == constants.OP_STATUS_SUCCESS:
1517
      has_ok = True
1518
    elif status == constants.OP_STATUS_ERROR:
1519
      errors.MaybeRaise(msg)
1520

    
1521
      if has_ok:
1522
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1523
                                 (idx, msg))
1524

    
1525
      raise errors.OpExecError(str(msg))
1526

    
1527
  # default failure mode
1528
  raise errors.OpExecError(result)
1529

    
1530

    
1531
class JobPollCbBase:
1532
  """Base class for L{GenericPollJob} callbacks.
1533

1534
  """
1535
  def __init__(self):
1536
    """Initializes this class.
1537

1538
    """
1539

    
1540
  def WaitForJobChangeOnce(self, job_id, fields,
1541
                           prev_job_info, prev_log_serial):
1542
    """Waits for changes on a job.
1543

1544
    """
1545
    raise NotImplementedError()
1546

    
1547
  def QueryJobs(self, job_ids, fields):
1548
    """Returns the selected fields for the selected job IDs.
1549

1550
    @type job_ids: list of numbers
1551
    @param job_ids: Job IDs
1552
    @type fields: list of strings
1553
    @param fields: Fields
1554

1555
    """
1556
    raise NotImplementedError()
1557

    
1558

    
1559
class JobPollReportCbBase:
1560
  """Base class for L{GenericPollJob} reporting callbacks.
1561

1562
  """
1563
  def __init__(self):
1564
    """Initializes this class.
1565

1566
    """
1567

    
1568
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1569
    """Handles a log message.
1570

1571
    """
1572
    raise NotImplementedError()
1573

    
1574
  def ReportNotChanged(self, job_id, status):
1575
    """Called for if a job hasn't changed in a while.
1576

1577
    @type job_id: number
1578
    @param job_id: Job ID
1579
    @type status: string or None
1580
    @param status: Job status if available
1581

1582
    """
1583
    raise NotImplementedError()
1584

    
1585

    
1586
class _LuxiJobPollCb(JobPollCbBase):
1587
  def __init__(self, cl):
1588
    """Initializes this class.
1589

1590
    """
1591
    JobPollCbBase.__init__(self)
1592
    self.cl = cl
1593

    
1594
  def WaitForJobChangeOnce(self, job_id, fields,
1595
                           prev_job_info, prev_log_serial):
1596
    """Waits for changes on a job.
1597

1598
    """
1599
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1600
                                        prev_job_info, prev_log_serial)
1601

    
1602
  def QueryJobs(self, job_ids, fields):
1603
    """Returns the selected fields for the selected job IDs.
1604

1605
    """
1606
    return self.cl.QueryJobs(job_ids, fields)
1607

    
1608

    
1609
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1610
  def __init__(self, feedback_fn):
1611
    """Initializes this class.
1612

1613
    """
1614
    JobPollReportCbBase.__init__(self)
1615

    
1616
    self.feedback_fn = feedback_fn
1617

    
1618
    assert callable(feedback_fn)
1619

    
1620
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1621
    """Handles a log message.
1622

1623
    """
1624
    self.feedback_fn((timestamp, log_type, log_msg))
1625

    
1626
  def ReportNotChanged(self, job_id, status):
1627
    """Called if a job hasn't changed in a while.
1628

1629
    """
1630
    # Ignore
1631

    
1632

    
1633
class StdioJobPollReportCb(JobPollReportCbBase):
1634
  def __init__(self):
1635
    """Initializes this class.
1636

1637
    """
1638
    JobPollReportCbBase.__init__(self)
1639

    
1640
    self.notified_queued = False
1641
    self.notified_waitlock = False
1642

    
1643
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1644
    """Handles a log message.
1645

1646
    """
1647
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1648
             FormatLogMessage(log_type, log_msg))
1649

    
1650
  def ReportNotChanged(self, job_id, status):
1651
    """Called if a job hasn't changed in a while.
1652

1653
    """
1654
    if status is None:
1655
      return
1656

    
1657
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1658
      ToStderr("Job %s is waiting in queue", job_id)
1659
      self.notified_queued = True
1660

    
1661
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1662
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1663
      self.notified_waitlock = True
1664

    
1665

    
1666
def FormatLogMessage(log_type, log_msg):
1667
  """Formats a job message according to its type.
1668

1669
  """
1670
  if log_type != constants.ELOG_MESSAGE:
1671
    log_msg = str(log_msg)
1672

    
1673
  return utils.SafeEncode(log_msg)
1674

    
1675

    
1676
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1677
  """Function to poll for the result of a job.
1678

1679
  @type job_id: job identified
1680
  @param job_id: the job to poll for results
1681
  @type cl: luxi.Client
1682
  @param cl: the luxi client to use for communicating with the master;
1683
             if None, a new client will be created
1684

1685
  """
1686
  if cl is None:
1687
    cl = GetClient()
1688

    
1689
  if reporter is None:
1690
    if feedback_fn:
1691
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1692
    else:
1693
      reporter = StdioJobPollReportCb()
1694
  elif feedback_fn:
1695
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1696

    
1697
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1698

    
1699

    
1700
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1701
  """Legacy function to submit an opcode.
1702

1703
  This is just a simple wrapper over the construction of the processor
1704
  instance. It should be extended to better handle feedback and
1705
  interaction functions.
1706

1707
  """
1708
  if cl is None:
1709
    cl = GetClient()
1710

    
1711
  SetGenericOpcodeOpts([op], opts)
1712

    
1713
  job_id = SendJob([op], cl=cl)
1714

    
1715
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1716
                       reporter=reporter)
1717

    
1718
  return op_results[0]
1719

    
1720

    
1721
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1722
  """Wrapper around SubmitOpCode or SendJob.
1723

1724
  This function will decide, based on the 'opts' parameter, whether to
1725
  submit and wait for the result of the opcode (and return it), or
1726
  whether to just send the job and print its identifier. It is used in
1727
  order to simplify the implementation of the '--submit' option.
1728

1729
  It will also process the opcodes if we're sending the via SendJob
1730
  (otherwise SubmitOpCode does it).
1731

1732
  """
1733
  if opts and opts.submit_only:
1734
    job = [op]
1735
    SetGenericOpcodeOpts(job, opts)
1736
    job_id = SendJob(job, cl=cl)
1737
    raise JobSubmittedException(job_id)
1738
  else:
1739
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1740

    
1741

    
1742
def SetGenericOpcodeOpts(opcode_list, options):
1743
  """Processor for generic options.
1744

1745
  This function updates the given opcodes based on generic command
1746
  line options (like debug, dry-run, etc.).
1747

1748
  @param opcode_list: list of opcodes
1749
  @param options: command line options or None
1750
  @return: None (in-place modification)
1751

1752
  """
1753
  if not options:
1754
    return
1755
  for op in opcode_list:
1756
    op.debug_level = options.debug
1757
    if hasattr(options, "dry_run"):
1758
      op.dry_run = options.dry_run
1759
    if getattr(options, "priority", None) is not None:
1760
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1761

    
1762

    
1763
def GetClient():
1764
  # TODO: Cache object?
1765
  try:
1766
    client = luxi.Client()
1767
  except luxi.NoMasterError:
1768
    ss = ssconf.SimpleStore()
1769

    
1770
    # Try to read ssconf file
1771
    try:
1772
      ss.GetMasterNode()
1773
    except errors.ConfigurationError:
1774
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1775
                                 " not part of a cluster")
1776

    
1777
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1778
    if master != myself:
1779
      raise errors.OpPrereqError("This is not the master node, please connect"
1780
                                 " to node '%s' and rerun the command" %
1781
                                 master)
1782
    raise
1783
  return client
1784

    
1785

    
1786
def FormatError(err):
1787
  """Return a formatted error message for a given error.
1788

1789
  This function takes an exception instance and returns a tuple
1790
  consisting of two values: first, the recommended exit code, and
1791
  second, a string describing the error message (not
1792
  newline-terminated).
1793

1794
  """
1795
  retcode = 1
1796
  obuf = StringIO()
1797
  msg = str(err)
1798
  if isinstance(err, errors.ConfigurationError):
1799
    txt = "Corrupt configuration file: %s" % msg
1800
    logging.error(txt)
1801
    obuf.write(txt + "\n")
1802
    obuf.write("Aborting.")
1803
    retcode = 2
1804
  elif isinstance(err, errors.HooksAbort):
1805
    obuf.write("Failure: hooks execution failed:\n")
1806
    for node, script, out in err.args[0]:
1807
      if out:
1808
        obuf.write("  node: %s, script: %s, output: %s\n" %
1809
                   (node, script, out))
1810
      else:
1811
        obuf.write("  node: %s, script: %s (no output)\n" %
1812
                   (node, script))
1813
  elif isinstance(err, errors.HooksFailure):
1814
    obuf.write("Failure: hooks general failure: %s" % msg)
1815
  elif isinstance(err, errors.ResolverError):
1816
    this_host = netutils.Hostname.GetSysName()
1817
    if err.args[0] == this_host:
1818
      msg = "Failure: can't resolve my own hostname ('%s')"
1819
    else:
1820
      msg = "Failure: can't resolve hostname '%s'"
1821
    obuf.write(msg % err.args[0])
1822
  elif isinstance(err, errors.OpPrereqError):
1823
    if len(err.args) == 2:
1824
      obuf.write("Failure: prerequisites not met for this"
1825
               " operation:\nerror type: %s, error details:\n%s" %
1826
                 (err.args[1], err.args[0]))
1827
    else:
1828
      obuf.write("Failure: prerequisites not met for this"
1829
                 " operation:\n%s" % msg)
1830
  elif isinstance(err, errors.OpExecError):
1831
    obuf.write("Failure: command execution error:\n%s" % msg)
1832
  elif isinstance(err, errors.TagError):
1833
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1834
  elif isinstance(err, errors.JobQueueDrainError):
1835
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1836
               " accept new requests\n")
1837
  elif isinstance(err, errors.JobQueueFull):
1838
    obuf.write("Failure: the job queue is full and doesn't accept new"
1839
               " job submissions until old jobs are archived\n")
1840
  elif isinstance(err, errors.TypeEnforcementError):
1841
    obuf.write("Parameter Error: %s" % msg)
1842
  elif isinstance(err, errors.ParameterError):
1843
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1844
  elif isinstance(err, luxi.NoMasterError):
1845
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1846
               " and listening for connections?")
1847
  elif isinstance(err, luxi.TimeoutError):
1848
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1849
               " been submitted and will continue to run even if the call"
1850
               " timed out. Useful commands in this situation are \"gnt-job"
1851
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1852
    obuf.write(msg)
1853
  elif isinstance(err, luxi.PermissionError):
1854
    obuf.write("It seems you don't have permissions to connect to the"
1855
               " master daemon.\nPlease retry as a different user.")
1856
  elif isinstance(err, luxi.ProtocolError):
1857
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1858
               "%s" % msg)
1859
  elif isinstance(err, errors.JobLost):
1860
    obuf.write("Error checking job status: %s" % msg)
1861
  elif isinstance(err, errors.GenericError):
1862
    obuf.write("Unhandled Ganeti error: %s" % msg)
1863
  elif isinstance(err, JobSubmittedException):
1864
    obuf.write("JobID: %s\n" % err.args[0])
1865
    retcode = 0
1866
  else:
1867
    obuf.write("Unhandled exception: %s" % msg)
1868
  return retcode, obuf.getvalue().rstrip('\n')
1869

    
1870

    
1871
def GenericMain(commands, override=None, aliases=None):
1872
  """Generic main function for all the gnt-* commands.
1873

1874
  Arguments:
1875
    - commands: a dictionary with a special structure, see the design doc
1876
                for command line handling.
1877
    - override: if not None, we expect a dictionary with keys that will
1878
                override command line options; this can be used to pass
1879
                options from the scripts to generic functions
1880
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1881

1882
  """
1883
  # save the program name and the entire command line for later logging
1884
  if sys.argv:
1885
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1886
    if len(sys.argv) >= 2:
1887
      binary += " " + sys.argv[1]
1888
      old_cmdline = " ".join(sys.argv[2:])
1889
    else:
1890
      old_cmdline = ""
1891
  else:
1892
    binary = "<unknown program>"
1893
    old_cmdline = ""
1894

    
1895
  if aliases is None:
1896
    aliases = {}
1897

    
1898
  try:
1899
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1900
  except errors.ParameterError, err:
1901
    result, err_msg = FormatError(err)
1902
    ToStderr(err_msg)
1903
    return 1
1904

    
1905
  if func is None: # parse error
1906
    return 1
1907

    
1908
  if override is not None:
1909
    for key, val in override.iteritems():
1910
      setattr(options, key, val)
1911

    
1912
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1913
                     stderr_logging=True, program=binary)
1914

    
1915
  if old_cmdline:
1916
    logging.info("run with arguments '%s'", old_cmdline)
1917
  else:
1918
    logging.info("run with no arguments")
1919

    
1920
  try:
1921
    result = func(options, args)
1922
  except (errors.GenericError, luxi.ProtocolError,
1923
          JobSubmittedException), err:
1924
    result, err_msg = FormatError(err)
1925
    logging.exception("Error during command processing")
1926
    ToStderr(err_msg)
1927

    
1928
  return result
1929

    
1930

    
1931
def ParseNicOption(optvalue):
1932
  """Parses the value of the --net option(s).
1933

1934
  """
1935
  try:
1936
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1937
  except (TypeError, ValueError), err:
1938
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1939

    
1940
  nics = [{}] * nic_max
1941
  for nidx, ndict in optvalue:
1942
    nidx = int(nidx)
1943

    
1944
    if not isinstance(ndict, dict):
1945
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1946
                                 " got %s" % (nidx, ndict))
1947

    
1948
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1949

    
1950
    nics[nidx] = ndict
1951

    
1952
  return nics
1953

    
1954

    
1955
def GenericInstanceCreate(mode, opts, args):
1956
  """Add an instance to the cluster via either creation or import.
1957

1958
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1959
  @param opts: the command line options selected by the user
1960
  @type args: list
1961
  @param args: should contain only one element, the new instance name
1962
  @rtype: int
1963
  @return: the desired exit code
1964

1965
  """
1966
  instance = args[0]
1967

    
1968
  (pnode, snode) = SplitNodeOption(opts.node)
1969

    
1970
  hypervisor = None
1971
  hvparams = {}
1972
  if opts.hypervisor:
1973
    hypervisor, hvparams = opts.hypervisor
1974

    
1975
  if opts.nics:
1976
    nics = ParseNicOption(opts.nics)
1977
  elif opts.no_nics:
1978
    # no nics
1979
    nics = []
1980
  elif mode == constants.INSTANCE_CREATE:
1981
    # default of one nic, all auto
1982
    nics = [{}]
1983
  else:
1984
    # mode == import
1985
    nics = []
1986

    
1987
  if opts.disk_template == constants.DT_DISKLESS:
1988
    if opts.disks or opts.sd_size is not None:
1989
      raise errors.OpPrereqError("Diskless instance but disk"
1990
                                 " information passed")
1991
    disks = []
1992
  else:
1993
    if (not opts.disks and not opts.sd_size
1994
        and mode == constants.INSTANCE_CREATE):
1995
      raise errors.OpPrereqError("No disk information specified")
1996
    if opts.disks and opts.sd_size is not None:
1997
      raise errors.OpPrereqError("Please use either the '--disk' or"
1998
                                 " '-s' option")
1999
    if opts.sd_size is not None:
2000
      opts.disks = [(0, {"size": opts.sd_size})]
2001

    
2002
    if opts.disks:
2003
      try:
2004
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2005
      except ValueError, err:
2006
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2007
      disks = [{}] * disk_max
2008
    else:
2009
      disks = []
2010
    for didx, ddict in opts.disks:
2011
      didx = int(didx)
2012
      if not isinstance(ddict, dict):
2013
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2014
        raise errors.OpPrereqError(msg)
2015
      elif "size" in ddict:
2016
        if "adopt" in ddict:
2017
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2018
                                     " (disk %d)" % didx)
2019
        try:
2020
          ddict["size"] = utils.ParseUnit(ddict["size"])
2021
        except ValueError, err:
2022
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2023
                                     (didx, err))
2024
      elif "adopt" in ddict:
2025
        if mode == constants.INSTANCE_IMPORT:
2026
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2027
                                     " import")
2028
        ddict["size"] = 0
2029
      else:
2030
        raise errors.OpPrereqError("Missing size or adoption source for"
2031
                                   " disk %d" % didx)
2032
      disks[didx] = ddict
2033

    
2034
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2035
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2036

    
2037
  if mode == constants.INSTANCE_CREATE:
2038
    start = opts.start
2039
    os_type = opts.os
2040
    force_variant = opts.force_variant
2041
    src_node = None
2042
    src_path = None
2043
    no_install = opts.no_install
2044
    identify_defaults = False
2045
  elif mode == constants.INSTANCE_IMPORT:
2046
    start = False
2047
    os_type = None
2048
    force_variant = False
2049
    src_node = opts.src_node
2050
    src_path = opts.src_dir
2051
    no_install = None
2052
    identify_defaults = opts.identify_defaults
2053
  else:
2054
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2055

    
2056
  op = opcodes.OpCreateInstance(instance_name=instance,
2057
                                disks=disks,
2058
                                disk_template=opts.disk_template,
2059
                                nics=nics,
2060
                                pnode=pnode, snode=snode,
2061
                                ip_check=opts.ip_check,
2062
                                name_check=opts.name_check,
2063
                                wait_for_sync=opts.wait_for_sync,
2064
                                file_storage_dir=opts.file_storage_dir,
2065
                                file_driver=opts.file_driver,
2066
                                iallocator=opts.iallocator,
2067
                                hypervisor=hypervisor,
2068
                                hvparams=hvparams,
2069
                                beparams=opts.beparams,
2070
                                osparams=opts.osparams,
2071
                                mode=mode,
2072
                                start=start,
2073
                                os_type=os_type,
2074
                                force_variant=force_variant,
2075
                                src_node=src_node,
2076
                                src_path=src_path,
2077
                                no_install=no_install,
2078
                                identify_defaults=identify_defaults)
2079

    
2080
  SubmitOrSend(op, opts)
2081
  return 0
2082

    
2083

    
2084
class _RunWhileClusterStoppedHelper:
2085
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2086

2087
  """
2088
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2089
    """Initializes this class.
2090

2091
    @type feedback_fn: callable
2092
    @param feedback_fn: Feedback function
2093
    @type cluster_name: string
2094
    @param cluster_name: Cluster name
2095
    @type master_node: string
2096
    @param master_node Master node name
2097
    @type online_nodes: list
2098
    @param online_nodes: List of names of online nodes
2099

2100
    """
2101
    self.feedback_fn = feedback_fn
2102
    self.cluster_name = cluster_name
2103
    self.master_node = master_node
2104
    self.online_nodes = online_nodes
2105

    
2106
    self.ssh = ssh.SshRunner(self.cluster_name)
2107

    
2108
    self.nonmaster_nodes = [name for name in online_nodes
2109
                            if name != master_node]
2110

    
2111
    assert self.master_node not in self.nonmaster_nodes
2112

    
2113
  def _RunCmd(self, node_name, cmd):
2114
    """Runs a command on the local or a remote machine.
2115

2116
    @type node_name: string
2117
    @param node_name: Machine name
2118
    @type cmd: list
2119
    @param cmd: Command
2120

2121
    """
2122
    if node_name is None or node_name == self.master_node:
2123
      # No need to use SSH
2124
      result = utils.RunCmd(cmd)
2125
    else:
2126
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2127

    
2128
    if result.failed:
2129
      errmsg = ["Failed to run command %s" % result.cmd]
2130
      if node_name:
2131
        errmsg.append("on node %s" % node_name)
2132
      errmsg.append(": exitcode %s and error %s" %
2133
                    (result.exit_code, result.output))
2134
      raise errors.OpExecError(" ".join(errmsg))
2135

    
2136
  def Call(self, fn, *args):
2137
    """Call function while all daemons are stopped.
2138

2139
    @type fn: callable
2140
    @param fn: Function to be called
2141

2142
    """
2143
    # Pause watcher by acquiring an exclusive lock on watcher state file
2144
    self.feedback_fn("Blocking watcher")
2145
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2146
    try:
2147
      # TODO: Currently, this just blocks. There's no timeout.
2148
      # TODO: Should it be a shared lock?
2149
      watcher_block.Exclusive(blocking=True)
2150

    
2151
      # Stop master daemons, so that no new jobs can come in and all running
2152
      # ones are finished
2153
      self.feedback_fn("Stopping master daemons")
2154
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2155
      try:
2156
        # Stop daemons on all nodes
2157
        for node_name in self.online_nodes:
2158
          self.feedback_fn("Stopping daemons on %s" % node_name)
2159
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2160

    
2161
        # All daemons are shut down now
2162
        try:
2163
          return fn(self, *args)
2164
        except Exception, err:
2165
          _, errmsg = FormatError(err)
2166
          logging.exception("Caught exception")
2167
          self.feedback_fn(errmsg)
2168
          raise
2169
      finally:
2170
        # Start cluster again, master node last
2171
        for node_name in self.nonmaster_nodes + [self.master_node]:
2172
          self.feedback_fn("Starting daemons on %s" % node_name)
2173
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2174
    finally:
2175
      # Resume watcher
2176
      watcher_block.Close()
2177

    
2178

    
2179
def RunWhileClusterStopped(feedback_fn, fn, *args):
2180
  """Calls a function while all cluster daemons are stopped.
2181

2182
  @type feedback_fn: callable
2183
  @param feedback_fn: Feedback function
2184
  @type fn: callable
2185
  @param fn: Function to be called when daemons are stopped
2186

2187
  """
2188
  feedback_fn("Gathering cluster information")
2189

    
2190
  # This ensures we're running on the master daemon
2191
  cl = GetClient()
2192

    
2193
  (cluster_name, master_node) = \
2194
    cl.QueryConfigValues(["cluster_name", "master_node"])
2195

    
2196
  online_nodes = GetOnlineNodes([], cl=cl)
2197

    
2198
  # Don't keep a reference to the client. The master daemon will go away.
2199
  del cl
2200

    
2201
  assert master_node in online_nodes
2202

    
2203
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2204
                                       online_nodes).Call(fn, *args)
2205

    
2206

    
2207
def GenerateTable(headers, fields, separator, data,
2208
                  numfields=None, unitfields=None,
2209
                  units=None):
2210
  """Prints a table with headers and different fields.
2211

2212
  @type headers: dict
2213
  @param headers: dictionary mapping field names to headers for
2214
      the table
2215
  @type fields: list
2216
  @param fields: the field names corresponding to each row in
2217
      the data field
2218
  @param separator: the separator to be used; if this is None,
2219
      the default 'smart' algorithm is used which computes optimal
2220
      field width, otherwise just the separator is used between
2221
      each field
2222
  @type data: list
2223
  @param data: a list of lists, each sublist being one row to be output
2224
  @type numfields: list
2225
  @param numfields: a list with the fields that hold numeric
2226
      values and thus should be right-aligned
2227
  @type unitfields: list
2228
  @param unitfields: a list with the fields that hold numeric
2229
      values that should be formatted with the units field
2230
  @type units: string or None
2231
  @param units: the units we should use for formatting, or None for
2232
      automatic choice (human-readable for non-separator usage, otherwise
2233
      megabytes); this is a one-letter string
2234

2235
  """
2236
  if units is None:
2237
    if separator:
2238
      units = "m"
2239
    else:
2240
      units = "h"
2241

    
2242
  if numfields is None:
2243
    numfields = []
2244
  if unitfields is None:
2245
    unitfields = []
2246

    
2247
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2248
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2249

    
2250
  format_fields = []
2251
  for field in fields:
2252
    if headers and field not in headers:
2253
      # TODO: handle better unknown fields (either revert to old
2254
      # style of raising exception, or deal more intelligently with
2255
      # variable fields)
2256
      headers[field] = field
2257
    if separator is not None:
2258
      format_fields.append("%s")
2259
    elif numfields.Matches(field):
2260
      format_fields.append("%*s")
2261
    else:
2262
      format_fields.append("%-*s")
2263

    
2264
  if separator is None:
2265
    mlens = [0 for name in fields]
2266
    format_str = ' '.join(format_fields)
2267
  else:
2268
    format_str = separator.replace("%", "%%").join(format_fields)
2269

    
2270
  for row in data:
2271
    if row is None:
2272
      continue
2273
    for idx, val in enumerate(row):
2274
      if unitfields.Matches(fields[idx]):
2275
        try:
2276
          val = int(val)
2277
        except (TypeError, ValueError):
2278
          pass
2279
        else:
2280
          val = row[idx] = utils.FormatUnit(val, units)
2281
      val = row[idx] = str(val)
2282
      if separator is None:
2283
        mlens[idx] = max(mlens[idx], len(val))
2284

    
2285
  result = []
2286
  if headers:
2287
    args = []
2288
    for idx, name in enumerate(fields):
2289
      hdr = headers[name]
2290
      if separator is None:
2291
        mlens[idx] = max(mlens[idx], len(hdr))
2292
        args.append(mlens[idx])
2293
      args.append(hdr)
2294
    result.append(format_str % tuple(args))
2295

    
2296
  if separator is None:
2297
    assert len(mlens) == len(fields)
2298

    
2299
    if fields and not numfields.Matches(fields[-1]):
2300
      mlens[-1] = 0
2301

    
2302
  for line in data:
2303
    args = []
2304
    if line is None:
2305
      line = ['-' for _ in fields]
2306
    for idx in range(len(fields)):
2307
      if separator is None:
2308
        args.append(mlens[idx])
2309
      args.append(line[idx])
2310
    result.append(format_str % tuple(args))
2311

    
2312
  return result
2313

    
2314

    
2315
def _FormatBool(value):
2316
  """Formats a boolean value as a string.
2317

2318
  """
2319
  if value:
2320
    return "Y"
2321
  return "N"
2322

    
2323

    
2324
#: Default formatting for query results; (callback, align right)
2325
_DEFAULT_FORMAT_QUERY = {
2326
  constants.QFT_TEXT: (str, False),
2327
  constants.QFT_BOOL: (_FormatBool, False),
2328
  constants.QFT_NUMBER: (str, True),
2329
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2330
  constants.QFT_OTHER: (str, False),
2331
  constants.QFT_UNKNOWN: (str, False),
2332
  }
2333

    
2334

    
2335
def _GetColumnFormatter(fdef, override, unit):
2336
  """Returns formatting function for a field.
2337

2338
  @type fdef: L{objects.QueryFieldDefinition}
2339
  @type override: dict
2340
  @param override: Dictionary for overriding field formatting functions,
2341
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2342
  @type unit: string
2343
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2344
  @rtype: tuple; (callable, bool)
2345
  @return: Returns the function to format a value (takes one parameter) and a
2346
    boolean for aligning the value on the right-hand side
2347

2348
  """
2349
  fmt = override.get(fdef.name, None)
2350
  if fmt is not None:
2351
    return fmt
2352

    
2353
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2354

    
2355
  if fdef.kind == constants.QFT_UNIT:
2356
    # Can't keep this information in the static dictionary
2357
    return (lambda value: utils.FormatUnit(value, unit), True)
2358

    
2359
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2360
  if fmt is not None:
2361
    return fmt
2362

    
2363
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2364

    
2365

    
2366
class _QueryColumnFormatter:
2367
  """Callable class for formatting fields of a query.
2368

2369
  """
2370
  def __init__(self, fn, status_fn):
2371
    """Initializes this class.
2372

2373
    @type fn: callable
2374
    @param fn: Formatting function
2375
    @type status_fn: callable
2376
    @param status_fn: Function to report fields' status
2377

2378
    """
2379
    self._fn = fn
2380
    self._status_fn = status_fn
2381

    
2382
  def __call__(self, data):
2383
    """Returns a field's string representation.
2384

2385
    """
2386
    (status, value) = data
2387

    
2388
    # Report status
2389
    self._status_fn(status)
2390

    
2391
    if status == constants.QRFS_NORMAL:
2392
      return self._fn(value)
2393

    
2394
    assert value is None, \
2395
           "Found value %r for abnormal status %s" % (value, status)
2396

    
2397
    if status == constants.QRFS_UNKNOWN:
2398
      return "(unknown)"
2399

    
2400
    if status == constants.QRFS_NODATA:
2401
      return "(nodata)"
2402

    
2403
    if status == constants.QRFS_UNAVAIL:
2404
      return "(unavail)"
2405

    
2406
    if status == constants.QRFS_OFFLINE:
2407
      return "(offline)"
2408

    
2409
    raise NotImplementedError("Unknown status %s" % status)
2410

    
2411

    
2412
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2413
                      header=False):
2414
  """Formats data in L{objects.QueryResponse}.
2415

2416
  @type result: L{objects.QueryResponse}
2417
  @param result: result of query operation
2418
  @type unit: string
2419
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2420
    see L{utils.text.FormatUnit}
2421
  @type format_override: dict
2422
  @param format_override: Dictionary for overriding field formatting functions,
2423
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2424
  @type separator: string or None
2425
  @param separator: String used to separate fields
2426
  @type header: bool
2427
  @param header: Whether to output header row
2428

2429
  """
2430
  if unit is None:
2431
    if separator:
2432
      unit = "m"
2433
    else:
2434
      unit = "h"
2435

    
2436
  if format_override is None:
2437
    format_override = {}
2438

    
2439
  stats = dict.fromkeys(constants.QRFS_ALL, 0)
2440

    
2441
  def _RecordStatus(status):
2442
    if status in stats:
2443
      stats[status] += 1
2444

    
2445
  columns = []
2446
  for fdef in result.fields:
2447
    assert fdef.title and fdef.name
2448
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2449
    columns.append(TableColumn(fdef.title,
2450
                               _QueryColumnFormatter(fn, _RecordStatus),
2451
                               align_right))
2452

    
2453
  table = FormatTable(result.data, columns, header, separator)
2454

    
2455
  # Collect statistics
2456
  assert len(stats) == len(constants.QRFS_ALL)
2457
  assert compat.all(count >= 0 for count in stats.values())
2458

    
2459
  # Determine overall status. If there was no data, unknown fields must be
2460
  # detected via the field definitions.
2461
  if (stats[constants.QRFS_UNKNOWN] or
2462
      (not result.data and _GetUnknownFields(result.fields))):
2463
    status = QR_UNKNOWN
2464
  elif compat.any(count > 0 for key, count in stats.items()
2465
                  if key != constants.QRFS_NORMAL):
2466
    status = QR_INCOMPLETE
2467
  else:
2468
    status = QR_NORMAL
2469

    
2470
  return (status, table)
2471

    
2472

    
2473
def _GetUnknownFields(fdefs):
2474
  """Returns list of unknown fields included in C{fdefs}.
2475

2476
  @type fdefs: list of L{objects.QueryFieldDefinition}
2477

2478
  """
2479
  return [fdef for fdef in fdefs
2480
          if fdef.kind == constants.QFT_UNKNOWN]
2481

    
2482

    
2483
def _WarnUnknownFields(fdefs):
2484
  """Prints a warning to stderr if a query included unknown fields.
2485

2486
  @type fdefs: list of L{objects.QueryFieldDefinition}
2487

2488
  """
2489
  unknown = _GetUnknownFields(fdefs)
2490
  if unknown:
2491
    ToStderr("Warning: Queried for unknown fields %s",
2492
             utils.CommaJoin(fdef.name for fdef in unknown))
2493
    return True
2494

    
2495
  return False
2496

    
2497

    
2498
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2499
                format_override=None):
2500
  """Generic implementation for listing all items of a resource.
2501

2502
  @param resource: One of L{constants.QR_OP_LUXI}
2503
  @type fields: list of strings
2504
  @param fields: List of fields to query for
2505
  @type names: list of strings
2506
  @param names: Names of items to query for
2507
  @type unit: string or None
2508
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2509
    None for automatic choice (human-readable for non-separator usage,
2510
    otherwise megabytes); this is a one-letter string
2511
  @type separator: string or None
2512
  @param separator: String used to separate fields
2513
  @type header: bool
2514
  @param header: Whether to show header row
2515
  @type format_override: dict
2516
  @param format_override: Dictionary for overriding field formatting functions,
2517
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2518

2519
  """
2520
  if cl is None:
2521
    cl = GetClient()
2522

    
2523
  if not names:
2524
    names = None
2525

    
2526
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2527

    
2528
  found_unknown = _WarnUnknownFields(response.fields)
2529

    
2530
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2531
                                     header=header,
2532
                                     format_override=format_override)
2533

    
2534
  for line in data:
2535
    ToStdout(line)
2536

    
2537
  assert ((found_unknown and status == QR_UNKNOWN) or
2538
          (not found_unknown and status != QR_UNKNOWN))
2539

    
2540
  if status == QR_UNKNOWN:
2541
    return constants.EXIT_UNKNOWN_FIELD
2542

    
2543
  # TODO: Should the list command fail if not all data could be collected?
2544
  return constants.EXIT_SUCCESS
2545

    
2546

    
2547
def GenericListFields(resource, fields, separator, header, cl=None):
2548
  """Generic implementation for listing fields for a resource.
2549

2550
  @param resource: One of L{constants.QR_OP_LUXI}
2551
  @type fields: list of strings
2552
  @param fields: List of fields to query for
2553
  @type separator: string or None
2554
  @param separator: String used to separate fields
2555
  @type header: bool
2556
  @param header: Whether to show header row
2557

2558
  """
2559
  if cl is None:
2560
    cl = GetClient()
2561

    
2562
  if not fields:
2563
    fields = None
2564

    
2565
  response = cl.QueryFields(resource, fields)
2566

    
2567
  found_unknown = _WarnUnknownFields(response.fields)
2568

    
2569
  columns = [
2570
    TableColumn("Name", str, False),
2571
    TableColumn("Title", str, False),
2572
    # TODO: Add field description to master daemon
2573
    ]
2574

    
2575
  rows = [[fdef.name, fdef.title] for fdef in response.fields]
2576

    
2577
  for line in FormatTable(rows, columns, header, separator):
2578
    ToStdout(line)
2579

    
2580
  if found_unknown:
2581
    return constants.EXIT_UNKNOWN_FIELD
2582

    
2583
  return constants.EXIT_SUCCESS
2584

    
2585

    
2586
class TableColumn:
2587
  """Describes a column for L{FormatTable}.
2588

2589
  """
2590
  def __init__(self, title, fn, align_right):
2591
    """Initializes this class.
2592

2593
    @type title: string
2594
    @param title: Column title
2595
    @type fn: callable
2596
    @param fn: Formatting function
2597
    @type align_right: bool
2598
    @param align_right: Whether to align values on the right-hand side
2599

2600
    """
2601
    self.title = title
2602
    self.format = fn
2603
    self.align_right = align_right
2604

    
2605

    
2606
def _GetColFormatString(width, align_right):
2607
  """Returns the format string for a field.
2608

2609
  """
2610
  if align_right:
2611
    sign = ""
2612
  else:
2613
    sign = "-"
2614

    
2615
  return "%%%s%ss" % (sign, width)
2616

    
2617

    
2618
def FormatTable(rows, columns, header, separator):
2619
  """Formats data as a table.
2620

2621
  @type rows: list of lists
2622
  @param rows: Row data, one list per row
2623
  @type columns: list of L{TableColumn}
2624
  @param columns: Column descriptions
2625
  @type header: bool
2626
  @param header: Whether to show header row
2627
  @type separator: string or None
2628
  @param separator: String used to separate columns
2629

2630
  """
2631
  if header:
2632
    data = [[col.title for col in columns]]
2633
    colwidth = [len(col.title) for col in columns]
2634
  else:
2635
    data = []
2636
    colwidth = [0 for _ in columns]
2637

    
2638
  # Format row data
2639
  for row in rows:
2640
    assert len(row) == len(columns)
2641

    
2642
    formatted = [col.format(value) for value, col in zip(row, columns)]
2643

    
2644
    if separator is None:
2645
      # Update column widths
2646
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2647
        # Modifying a list's items while iterating is fine
2648
        colwidth[idx] = max(oldwidth, len(value))
2649

    
2650
    data.append(formatted)
2651

    
2652
  if separator is not None:
2653
    # Return early if a separator is used
2654
    return [separator.join(row) for row in data]
2655

    
2656
  if columns and not columns[-1].align_right:
2657
    # Avoid unnecessary spaces at end of line
2658
    colwidth[-1] = 0
2659

    
2660
  # Build format string
2661
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2662
                  for col, width in zip(columns, colwidth)])
2663

    
2664
  return [fmt % tuple(row) for row in data]
2665

    
2666

    
2667
def FormatTimestamp(ts):
2668
  """Formats a given timestamp.
2669

2670
  @type ts: timestamp
2671
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2672

2673
  @rtype: string
2674
  @return: a string with the formatted timestamp
2675

2676
  """
2677
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2678
    return '?'
2679
  sec, usec = ts
2680
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2681

    
2682

    
2683
def ParseTimespec(value):
2684
  """Parse a time specification.
2685

2686
  The following suffixed will be recognized:
2687

2688
    - s: seconds
2689
    - m: minutes
2690
    - h: hours
2691
    - d: day
2692
    - w: weeks
2693

2694
  Without any suffix, the value will be taken to be in seconds.
2695

2696
  """
2697
  value = str(value)
2698
  if not value:
2699
    raise errors.OpPrereqError("Empty time specification passed")
2700
  suffix_map = {
2701
    's': 1,
2702
    'm': 60,
2703
    'h': 3600,
2704
    'd': 86400,
2705
    'w': 604800,
2706
    }
2707
  if value[-1] not in suffix_map:
2708
    try:
2709
      value = int(value)
2710
    except (TypeError, ValueError):
2711
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2712
  else:
2713
    multiplier = suffix_map[value[-1]]
2714
    value = value[:-1]
2715
    if not value: # no data left after stripping the suffix
2716
      raise errors.OpPrereqError("Invalid time specification (only"
2717
                                 " suffix passed)")
2718
    try:
2719
      value = int(value) * multiplier
2720
    except (TypeError, ValueError):
2721
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2722
  return value
2723

    
2724

    
2725
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2726
                   filter_master=False):
2727
  """Returns the names of online nodes.
2728

2729
  This function will also log a warning on stderr with the names of
2730
  the online nodes.
2731

2732
  @param nodes: if not empty, use only this subset of nodes (minus the
2733
      offline ones)
2734
  @param cl: if not None, luxi client to use
2735
  @type nowarn: boolean
2736
  @param nowarn: by default, this function will output a note with the
2737
      offline nodes that are skipped; if this parameter is True the
2738
      note is not displayed
2739
  @type secondary_ips: boolean
2740
  @param secondary_ips: if True, return the secondary IPs instead of the
2741
      names, useful for doing network traffic over the replication interface
2742
      (if any)
2743
  @type filter_master: boolean
2744
  @param filter_master: if True, do not return the master node in the list
2745
      (useful in coordination with secondary_ips where we cannot check our
2746
      node name against the list)
2747

2748
  """
2749
  if cl is None:
2750
    cl = GetClient()
2751

    
2752
  if secondary_ips:
2753
    name_idx = 2
2754
  else:
2755
    name_idx = 0
2756

    
2757
  if filter_master:
2758
    master_node = cl.QueryConfigValues(["master_node"])[0]
2759
    filter_fn = lambda x: x != master_node
2760
  else:
2761
    filter_fn = lambda _: True
2762

    
2763
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2764
                         use_locking=False)
2765
  offline = [row[0] for row in result if row[1]]
2766
  if offline and not nowarn:
2767
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2768
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2769

    
2770

    
2771
def _ToStream(stream, txt, *args):
2772
  """Write a message to a stream, bypassing the logging system
2773

2774
  @type stream: file object
2775
  @param stream: the file to which we should write
2776
  @type txt: str
2777
  @param txt: the message
2778

2779
  """
2780
  if args:
2781
    args = tuple(args)
2782
    stream.write(txt % args)
2783
  else:
2784
    stream.write(txt)
2785
  stream.write('\n')
2786
  stream.flush()
2787

    
2788

    
2789
def ToStdout(txt, *args):
2790
  """Write a message to stdout only, bypassing the logging system
2791

2792
  This is just a wrapper over _ToStream.
2793

2794
  @type txt: str
2795
  @param txt: the message
2796

2797
  """
2798
  _ToStream(sys.stdout, txt, *args)
2799

    
2800

    
2801
def ToStderr(txt, *args):
2802
  """Write a message to stderr only, bypassing the logging system
2803

2804
  This is just a wrapper over _ToStream.
2805

2806
  @type txt: str
2807
  @param txt: the message
2808

2809
  """
2810
  _ToStream(sys.stderr, txt, *args)
2811

    
2812

    
2813
class JobExecutor(object):
2814
  """Class which manages the submission and execution of multiple jobs.
2815

2816
  Note that instances of this class should not be reused between
2817
  GetResults() calls.
2818

2819
  """
2820
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2821
    self.queue = []
2822
    if cl is None:
2823
      cl = GetClient()
2824
    self.cl = cl
2825
    self.verbose = verbose
2826
    self.jobs = []
2827
    self.opts = opts
2828
    self.feedback_fn = feedback_fn
2829

    
2830
  def QueueJob(self, name, *ops):
2831
    """Record a job for later submit.
2832

2833
    @type name: string
2834
    @param name: a description of the job, will be used in WaitJobSet
2835
    """
2836
    SetGenericOpcodeOpts(ops, self.opts)
2837
    self.queue.append((name, ops))
2838

    
2839
  def SubmitPending(self, each=False):
2840
    """Submit all pending jobs.
2841

2842
    """
2843
    if each:
2844
      results = []
2845
      for row in self.queue:
2846
        # SubmitJob will remove the success status, but raise an exception if
2847
        # the submission fails, so we'll notice that anyway.
2848
        results.append([True, self.cl.SubmitJob(row[1])])
2849
    else:
2850
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2851
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2852
                                                            self.queue)):
2853
      self.jobs.append((idx, status, data, name))
2854

    
2855
  def _ChooseJob(self):
2856
    """Choose a non-waiting/queued job to poll next.
2857

2858
    """
2859
    assert self.jobs, "_ChooseJob called with empty job list"
2860

    
2861
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2862
    assert result
2863

    
2864
    for job_data, status in zip(self.jobs, result):
2865
      if (isinstance(status, list) and status and
2866
          status[0] in (constants.JOB_STATUS_QUEUED,
2867
                        constants.JOB_STATUS_WAITLOCK,
2868
                        constants.JOB_STATUS_CANCELING)):
2869
        # job is still present and waiting
2870
        continue
2871
      # good candidate found (either running job or lost job)
2872
      self.jobs.remove(job_data)
2873
      return job_data
2874

    
2875
    # no job found
2876
    return self.jobs.pop(0)
2877

    
2878
  def GetResults(self):
2879
    """Wait for and return the results of all jobs.
2880

2881
    @rtype: list
2882
    @return: list of tuples (success, job results), in the same order
2883
        as the submitted jobs; if a job has failed, instead of the result
2884
        there will be the error message
2885

2886
    """
2887
    if not self.jobs:
2888
      self.SubmitPending()
2889
    results = []
2890
    if self.verbose:
2891
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2892
      if ok_jobs:
2893
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2894

    
2895
    # first, remove any non-submitted jobs
2896
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2897
    for idx, _, jid, name in failures:
2898
      ToStderr("Failed to submit job for %s: %s", name, jid)
2899
      results.append((idx, False, jid))
2900

    
2901
    while self.jobs:
2902
      (idx, _, jid, name) = self._ChooseJob()
2903
      ToStdout("Waiting for job %s for %s...", jid, name)
2904
      try:
2905
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2906
        success = True
2907
      except errors.JobLost, err:
2908
        _, job_result = FormatError(err)
2909
        ToStderr("Job %s for %s has been archived, cannot check its result",
2910
                 jid, name)
2911
        success = False
2912
      except (errors.GenericError, luxi.ProtocolError), err:
2913
        _, job_result = FormatError(err)
2914
        success = False
2915
        # the error message will always be shown, verbose or not
2916
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2917

    
2918
      results.append((idx, success, job_result))
2919

    
2920
    # sort based on the index, then drop it
2921
    results.sort()
2922
    results = [i[1:] for i in results]
2923

    
2924
    return results
2925

    
2926
  def WaitOrShow(self, wait):
2927
    """Wait for job results or only print the job IDs.
2928

2929
    @type wait: boolean
2930
    @param wait: whether to wait or not
2931

2932
    """
2933
    if wait:
2934
      return self.GetResults()
2935
    else:
2936
      if not self.jobs:
2937
        self.SubmitPending()
2938
      for _, status, result, name in self.jobs:
2939
        if status:
2940
          ToStdout("%s: %s", result, name)
2941
        else:
2942
          ToStderr("Failure for %s: %s", name, result)
2943
      return [row[1:3] for row in self.jobs]