Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 18009c1e

History | View | Annotate | Download (93.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "AUTO_PROMOTE_OPT",
55
  "AUTO_REPLACE_OPT",
56
  "BACKEND_OPT",
57
  "BLK_OS_OPT",
58
  "CAPAB_MASTER_OPT",
59
  "CAPAB_VM_OPT",
60
  "CLEANUP_OPT",
61
  "CLUSTER_DOMAIN_SECRET_OPT",
62
  "CONFIRM_OPT",
63
  "CP_SIZE_OPT",
64
  "DEBUG_OPT",
65
  "DEBUG_SIMERR_OPT",
66
  "DISKIDX_OPT",
67
  "DISK_OPT",
68
  "DISK_TEMPLATE_OPT",
69
  "DRAINED_OPT",
70
  "DRY_RUN_OPT",
71
  "DRBD_HELPER_OPT",
72
  "EARLY_RELEASE_OPT",
73
  "ENABLED_HV_OPT",
74
  "ERROR_CODES_OPT",
75
  "FIELDS_OPT",
76
  "FILESTORE_DIR_OPT",
77
  "FILESTORE_DRIVER_OPT",
78
  "FORCE_OPT",
79
  "FORCE_VARIANT_OPT",
80
  "GLOBAL_FILEDIR_OPT",
81
  "HID_OS_OPT",
82
  "HVLIST_OPT",
83
  "HVOPTS_OPT",
84
  "HYPERVISOR_OPT",
85
  "IALLOCATOR_OPT",
86
  "DEFAULT_IALLOCATOR_OPT",
87
  "IDENTIFY_DEFAULTS_OPT",
88
  "IGNORE_CONSIST_OPT",
89
  "IGNORE_FAILURES_OPT",
90
  "IGNORE_OFFLINE_OPT",
91
  "IGNORE_REMOVE_FAILURES_OPT",
92
  "IGNORE_SECONDARIES_OPT",
93
  "IGNORE_SIZE_OPT",
94
  "INTERVAL_OPT",
95
  "MAC_PREFIX_OPT",
96
  "MAINTAIN_NODE_HEALTH_OPT",
97
  "MASTER_NETDEV_OPT",
98
  "MC_OPT",
99
  "MIGRATION_MODE_OPT",
100
  "NET_OPT",
101
  "NEW_CLUSTER_CERT_OPT",
102
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103
  "NEW_CONFD_HMAC_KEY_OPT",
104
  "NEW_RAPI_CERT_OPT",
105
  "NEW_SECONDARY_OPT",
106
  "NIC_PARAMS_OPT",
107
  "NODE_LIST_OPT",
108
  "NODE_PLACEMENT_OPT",
109
  "NODEGROUP_OPT",
110
  "NODE_PARAMS_OPT",
111
  "NODE_POWERED_OPT",
112
  "NODRBD_STORAGE_OPT",
113
  "NOHDR_OPT",
114
  "NOIPCHECK_OPT",
115
  "NO_INSTALL_OPT",
116
  "NONAMECHECK_OPT",
117
  "NOLVM_STORAGE_OPT",
118
  "NOMODIFY_ETCHOSTS_OPT",
119
  "NOMODIFY_SSH_SETUP_OPT",
120
  "NONICS_OPT",
121
  "NONLIVE_OPT",
122
  "NONPLUS1_OPT",
123
  "NOSHUTDOWN_OPT",
124
  "NOSTART_OPT",
125
  "NOSSH_KEYCHECK_OPT",
126
  "NOVOTING_OPT",
127
  "NWSYNC_OPT",
128
  "ON_PRIMARY_OPT",
129
  "ON_SECONDARY_OPT",
130
  "OFFLINE_OPT",
131
  "OSPARAMS_OPT",
132
  "OS_OPT",
133
  "OS_SIZE_OPT",
134
  "PREALLOC_WIPE_DISKS_OPT",
135
  "PRIMARY_IP_VERSION_OPT",
136
  "PRIORITY_OPT",
137
  "RAPI_CERT_OPT",
138
  "READD_OPT",
139
  "REBOOT_TYPE_OPT",
140
  "REMOVE_INSTANCE_OPT",
141
  "REMOVE_UIDS_OPT",
142
  "RESERVED_LVS_OPT",
143
  "ROMAN_OPT",
144
  "SECONDARY_IP_OPT",
145
  "SELECT_OS_OPT",
146
  "SEP_OPT",
147
  "SHOWCMD_OPT",
148
  "SHUTDOWN_TIMEOUT_OPT",
149
  "SINGLE_NODE_OPT",
150
  "SRC_DIR_OPT",
151
  "SRC_NODE_OPT",
152
  "SUBMIT_OPT",
153
  "STATIC_OPT",
154
  "SYNC_OPT",
155
  "TAG_SRC_OPT",
156
  "TIMEOUT_OPT",
157
  "UIDPOOL_OPT",
158
  "USEUNITS_OPT",
159
  "USE_REPL_NET_OPT",
160
  "VERBOSE_OPT",
161
  "VG_NAME_OPT",
162
  "YES_DOIT_OPT",
163
  # Generic functions for CLI programs
164
  "GenericMain",
165
  "GenericInstanceCreate",
166
  "GenericList",
167
  "GenericListFields",
168
  "GetClient",
169
  "GetOnlineNodes",
170
  "JobExecutor",
171
  "JobSubmittedException",
172
  "ParseTimespec",
173
  "RunWhileClusterStopped",
174
  "SubmitOpCode",
175
  "SubmitOrSend",
176
  "UsesRPC",
177
  # Formatting functions
178
  "ToStderr", "ToStdout",
179
  "FormatError",
180
  "FormatQueryResult",
181
  "GenerateTable",
182
  "AskUser",
183
  "FormatTimestamp",
184
  "FormatLogMessage",
185
  # Tags functions
186
  "ListTags",
187
  "AddTags",
188
  "RemoveTags",
189
  # command line options support infrastructure
190
  "ARGS_MANY_INSTANCES",
191
  "ARGS_MANY_NODES",
192
  "ARGS_MANY_GROUPS",
193
  "ARGS_NONE",
194
  "ARGS_ONE_INSTANCE",
195
  "ARGS_ONE_NODE",
196
  "ARGS_ONE_GROUP",
197
  "ARGS_ONE_OS",
198
  "ArgChoice",
199
  "ArgCommand",
200
  "ArgFile",
201
  "ArgGroup",
202
  "ArgHost",
203
  "ArgInstance",
204
  "ArgJobId",
205
  "ArgNode",
206
  "ArgOs",
207
  "ArgSuggest",
208
  "ArgUnknown",
209
  "OPT_COMPL_INST_ADD_NODES",
210
  "OPT_COMPL_MANY_NODES",
211
  "OPT_COMPL_ONE_IALLOCATOR",
212
  "OPT_COMPL_ONE_INSTANCE",
213
  "OPT_COMPL_ONE_NODE",
214
  "OPT_COMPL_ONE_NODEGROUP",
215
  "OPT_COMPL_ONE_OS",
216
  "cli_option",
217
  "SplitNodeOption",
218
  "CalculateOSNames",
219
  "ParseFields",
220
  "COMMON_CREATE_OPTS",
221
  ]
222

    
223
NO_PREFIX = "no_"
224
UN_PREFIX = "-"
225

    
226
#: Priorities (sorted)
227
_PRIORITY_NAMES = [
228
  ("low", constants.OP_PRIO_LOW),
229
  ("normal", constants.OP_PRIO_NORMAL),
230
  ("high", constants.OP_PRIO_HIGH),
231
  ]
232

    
233
#: Priority dictionary for easier lookup
234
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
235
# we migrate to Python 2.6
236
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
237

    
238
# Query result status for clients
239
(QR_NORMAL,
240
 QR_UNKNOWN,
241
 QR_INCOMPLETE) = range(3)
242

    
243

    
244
class _Argument:
245
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
246
    self.min = min
247
    self.max = max
248

    
249
  def __repr__(self):
250
    return ("<%s min=%s max=%s>" %
251
            (self.__class__.__name__, self.min, self.max))
252

    
253

    
254
class ArgSuggest(_Argument):
255
  """Suggesting argument.
256

257
  Value can be any of the ones passed to the constructor.
258

259
  """
260
  # pylint: disable-msg=W0622
261
  def __init__(self, min=0, max=None, choices=None):
262
    _Argument.__init__(self, min=min, max=max)
263
    self.choices = choices
264

    
265
  def __repr__(self):
266
    return ("<%s min=%s max=%s choices=%r>" %
267
            (self.__class__.__name__, self.min, self.max, self.choices))
268

    
269

    
270
class ArgChoice(ArgSuggest):
271
  """Choice argument.
272

273
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
274
  but value must be one of the choices.
275

276
  """
277

    
278

    
279
class ArgUnknown(_Argument):
280
  """Unknown argument to program (e.g. determined at runtime).
281

282
  """
283

    
284

    
285
class ArgInstance(_Argument):
286
  """Instances argument.
287

288
  """
289

    
290

    
291
class ArgNode(_Argument):
292
  """Node argument.
293

294
  """
295

    
296

    
297
class ArgGroup(_Argument):
298
  """Node group argument.
299

300
  """
301

    
302

    
303
class ArgJobId(_Argument):
304
  """Job ID argument.
305

306
  """
307

    
308

    
309
class ArgFile(_Argument):
310
  """File path argument.
311

312
  """
313

    
314

    
315
class ArgCommand(_Argument):
316
  """Command argument.
317

318
  """
319

    
320

    
321
class ArgHost(_Argument):
322
  """Host argument.
323

324
  """
325

    
326

    
327
class ArgOs(_Argument):
328
  """OS argument.
329

330
  """
331

    
332

    
333
ARGS_NONE = []
334
ARGS_MANY_INSTANCES = [ArgInstance()]
335
ARGS_MANY_NODES = [ArgNode()]
336
ARGS_MANY_GROUPS = [ArgGroup()]
337
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
338
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
339
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
340
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
341

    
342

    
343
def _ExtractTagsObject(opts, args):
344
  """Extract the tag type object.
345

346
  Note that this function will modify its args parameter.
347

348
  """
349
  if not hasattr(opts, "tag_type"):
350
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
351
  kind = opts.tag_type
352
  if kind == constants.TAG_CLUSTER:
353
    retval = kind, kind
354
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
355
    if not args:
356
      raise errors.OpPrereqError("no arguments passed to the command")
357
    name = args.pop(0)
358
    retval = kind, name
359
  else:
360
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
361
  return retval
362

    
363

    
364
def _ExtendTags(opts, args):
365
  """Extend the args if a source file has been given.
366

367
  This function will extend the tags with the contents of the file
368
  passed in the 'tags_source' attribute of the opts parameter. A file
369
  named '-' will be replaced by stdin.
370

371
  """
372
  fname = opts.tags_source
373
  if fname is None:
374
    return
375
  if fname == "-":
376
    new_fh = sys.stdin
377
  else:
378
    new_fh = open(fname, "r")
379
  new_data = []
380
  try:
381
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
382
    # because of python bug 1633941
383
    while True:
384
      line = new_fh.readline()
385
      if not line:
386
        break
387
      new_data.append(line.strip())
388
  finally:
389
    new_fh.close()
390
  args.extend(new_data)
391

    
392

    
393
def ListTags(opts, args):
394
  """List the tags on a given object.
395

396
  This is a generic implementation that knows how to deal with all
397
  three cases of tag objects (cluster, node, instance). The opts
398
  argument is expected to contain a tag_type field denoting what
399
  object type we work on.
400

401
  """
402
  kind, name = _ExtractTagsObject(opts, args)
403
  cl = GetClient()
404
  result = cl.QueryTags(kind, name)
405
  result = list(result)
406
  result.sort()
407
  for tag in result:
408
    ToStdout(tag)
409

    
410

    
411
def AddTags(opts, args):
412
  """Add tags on a given object.
413

414
  This is a generic implementation that knows how to deal with all
415
  three cases of tag objects (cluster, node, instance). The opts
416
  argument is expected to contain a tag_type field denoting what
417
  object type we work on.
418

419
  """
420
  kind, name = _ExtractTagsObject(opts, args)
421
  _ExtendTags(opts, args)
422
  if not args:
423
    raise errors.OpPrereqError("No tags to be added")
424
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
425
  SubmitOpCode(op, opts=opts)
426

    
427

    
428
def RemoveTags(opts, args):
429
  """Remove tags from a given object.
430

431
  This is a generic implementation that knows how to deal with all
432
  three cases of tag objects (cluster, node, instance). The opts
433
  argument is expected to contain a tag_type field denoting what
434
  object type we work on.
435

436
  """
437
  kind, name = _ExtractTagsObject(opts, args)
438
  _ExtendTags(opts, args)
439
  if not args:
440
    raise errors.OpPrereqError("No tags to be removed")
441
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
442
  SubmitOpCode(op, opts=opts)
443

    
444

    
445
def check_unit(option, opt, value): # pylint: disable-msg=W0613
446
  """OptParsers custom converter for units.
447

448
  """
449
  try:
450
    return utils.ParseUnit(value)
451
  except errors.UnitParseError, err:
452
    raise OptionValueError("option %s: %s" % (opt, err))
453

    
454

    
455
def _SplitKeyVal(opt, data):
456
  """Convert a KeyVal string into a dict.
457

458
  This function will convert a key=val[,...] string into a dict. Empty
459
  values will be converted specially: keys which have the prefix 'no_'
460
  will have the value=False and the prefix stripped, the others will
461
  have value=True.
462

463
  @type opt: string
464
  @param opt: a string holding the option name for which we process the
465
      data, used in building error messages
466
  @type data: string
467
  @param data: a string of the format key=val,key=val,...
468
  @rtype: dict
469
  @return: {key=val, key=val}
470
  @raises errors.ParameterError: if there are duplicate keys
471

472
  """
473
  kv_dict = {}
474
  if data:
475
    for elem in utils.UnescapeAndSplit(data, sep=","):
476
      if "=" in elem:
477
        key, val = elem.split("=", 1)
478
      else:
479
        if elem.startswith(NO_PREFIX):
480
          key, val = elem[len(NO_PREFIX):], False
481
        elif elem.startswith(UN_PREFIX):
482
          key, val = elem[len(UN_PREFIX):], None
483
        else:
484
          key, val = elem, True
485
      if key in kv_dict:
486
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
487
                                    (key, opt))
488
      kv_dict[key] = val
489
  return kv_dict
490

    
491

    
492
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
493
  """Custom parser for ident:key=val,key=val options.
494

495
  This will store the parsed values as a tuple (ident, {key: val}). As such,
496
  multiple uses of this option via action=append is possible.
497

498
  """
499
  if ":" not in value:
500
    ident, rest = value, ''
501
  else:
502
    ident, rest = value.split(":", 1)
503

    
504
  if ident.startswith(NO_PREFIX):
505
    if rest:
506
      msg = "Cannot pass options when removing parameter groups: %s" % value
507
      raise errors.ParameterError(msg)
508
    retval = (ident[len(NO_PREFIX):], False)
509
  elif ident.startswith(UN_PREFIX):
510
    if rest:
511
      msg = "Cannot pass options when removing parameter groups: %s" % value
512
      raise errors.ParameterError(msg)
513
    retval = (ident[len(UN_PREFIX):], None)
514
  else:
515
    kv_dict = _SplitKeyVal(opt, rest)
516
    retval = (ident, kv_dict)
517
  return retval
518

    
519

    
520
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
521
  """Custom parser class for key=val,key=val options.
522

523
  This will store the parsed values as a dict {key: val}.
524

525
  """
526
  return _SplitKeyVal(opt, value)
527

    
528

    
529
def check_bool(option, opt, value): # pylint: disable-msg=W0613
530
  """Custom parser for yes/no options.
531

532
  This will store the parsed value as either True or False.
533

534
  """
535
  value = value.lower()
536
  if value == constants.VALUE_FALSE or value == "no":
537
    return False
538
  elif value == constants.VALUE_TRUE or value == "yes":
539
    return True
540
  else:
541
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
542

    
543

    
544
# completion_suggestion is normally a list. Using numeric values not evaluating
545
# to False for dynamic completion.
546
(OPT_COMPL_MANY_NODES,
547
 OPT_COMPL_ONE_NODE,
548
 OPT_COMPL_ONE_INSTANCE,
549
 OPT_COMPL_ONE_OS,
550
 OPT_COMPL_ONE_IALLOCATOR,
551
 OPT_COMPL_INST_ADD_NODES,
552
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
553

    
554
OPT_COMPL_ALL = frozenset([
555
  OPT_COMPL_MANY_NODES,
556
  OPT_COMPL_ONE_NODE,
557
  OPT_COMPL_ONE_INSTANCE,
558
  OPT_COMPL_ONE_OS,
559
  OPT_COMPL_ONE_IALLOCATOR,
560
  OPT_COMPL_INST_ADD_NODES,
561
  OPT_COMPL_ONE_NODEGROUP,
562
  ])
563

    
564

    
565
class CliOption(Option):
566
  """Custom option class for optparse.
567

568
  """
569
  ATTRS = Option.ATTRS + [
570
    "completion_suggest",
571
    ]
572
  TYPES = Option.TYPES + (
573
    "identkeyval",
574
    "keyval",
575
    "unit",
576
    "bool",
577
    )
578
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
579
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
580
  TYPE_CHECKER["keyval"] = check_key_val
581
  TYPE_CHECKER["unit"] = check_unit
582
  TYPE_CHECKER["bool"] = check_bool
583

    
584

    
585
# optparse.py sets make_option, so we do it for our own option class, too
586
cli_option = CliOption
587

    
588

    
589
_YORNO = "yes|no"
590

    
591
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
592
                       help="Increase debugging level")
593

    
594
NOHDR_OPT = cli_option("--no-headers", default=False,
595
                       action="store_true", dest="no_headers",
596
                       help="Don't display column headers")
597

    
598
SEP_OPT = cli_option("--separator", default=None,
599
                     action="store", dest="separator",
600
                     help=("Separator between output fields"
601
                           " (defaults to one space)"))
602

    
603
USEUNITS_OPT = cli_option("--units", default=None,
604
                          dest="units", choices=('h', 'm', 'g', 't'),
605
                          help="Specify units for output (one of h/m/g/t)")
606

    
607
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
608
                        type="string", metavar="FIELDS",
609
                        help="Comma separated list of output fields")
610

    
611
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
612
                       default=False, help="Force the operation")
613

    
614
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
615
                         default=False, help="Do not require confirmation")
616

    
617
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
618
                                  action="store_true", default=False,
619
                                  help=("Ignore offline nodes and do as much"
620
                                        " as possible"))
621

    
622
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
623
                         default=None, help="File with tag names")
624

    
625
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
626
                        default=False, action="store_true",
627
                        help=("Submit the job and return the job ID, but"
628
                              " don't wait for the job to finish"))
629

    
630
SYNC_OPT = cli_option("--sync", dest="do_locking",
631
                      default=False, action="store_true",
632
                      help=("Grab locks while doing the queries"
633
                            " in order to ensure more consistent results"))
634

    
635
DRY_RUN_OPT = cli_option("--dry-run", default=False,
636
                         action="store_true",
637
                         help=("Do not execute the operation, just run the"
638
                               " check steps and verify it it could be"
639
                               " executed"))
640

    
641
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
642
                         action="store_true",
643
                         help="Increase the verbosity of the operation")
644

    
645
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
646
                              action="store_true", dest="simulate_errors",
647
                              help="Debugging option that makes the operation"
648
                              " treat most runtime checks as failed")
649

    
650
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
651
                        default=True, action="store_false",
652
                        help="Don't wait for sync (DANGEROUS!)")
653

    
654
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
655
                               help="Custom disk setup (diskless, file,"
656
                               " plain or drbd)",
657
                               default=None, metavar="TEMPL",
658
                               choices=list(constants.DISK_TEMPLATES))
659

    
660
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
661
                        help="Do not create any network cards for"
662
                        " the instance")
663

    
664
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
665
                               help="Relative path under default cluster-wide"
666
                               " file storage dir to store file-based disks",
667
                               default=None, metavar="<DIR>")
668

    
669
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
670
                                  help="Driver to use for image files",
671
                                  default="loop", metavar="<DRIVER>",
672
                                  choices=list(constants.FILE_DRIVER))
673

    
674
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
675
                            help="Select nodes for the instance automatically"
676
                            " using the <NAME> iallocator plugin",
677
                            default=None, type="string",
678
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
679

    
680
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
681
                            metavar="<NAME>",
682
                            help="Set the default instance allocator plugin",
683
                            default=None, type="string",
684
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
685

    
686
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
687
                    metavar="<os>",
688
                    completion_suggest=OPT_COMPL_ONE_OS)
689

    
690
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
691
                         type="keyval", default={},
692
                         help="OS parameters")
693

    
694
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
695
                               action="store_true", default=False,
696
                               help="Force an unknown variant")
697

    
698
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
699
                            action="store_true", default=False,
700
                            help="Do not install the OS (will"
701
                            " enable no-start)")
702

    
703
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
704
                         type="keyval", default={},
705
                         help="Backend parameters")
706

    
707
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
708
                         default={}, dest="hvparams",
709
                         help="Hypervisor parameters")
710

    
711
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
712
                            help="Hypervisor and hypervisor options, in the"
713
                            " format hypervisor:option=value,option=value,...",
714
                            default=None, type="identkeyval")
715

    
716
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
717
                        help="Hypervisor and hypervisor options, in the"
718
                        " format hypervisor:option=value,option=value,...",
719
                        default=[], action="append", type="identkeyval")
720

    
721
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
722
                           action="store_false",
723
                           help="Don't check that the instance's IP"
724
                           " is alive")
725

    
726
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
727
                             default=True, action="store_false",
728
                             help="Don't check that the instance's name"
729
                             " is resolvable")
730

    
731
NET_OPT = cli_option("--net",
732
                     help="NIC parameters", default=[],
733
                     dest="nics", action="append", type="identkeyval")
734

    
735
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
736
                      dest="disks", action="append", type="identkeyval")
737

    
738
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
739
                         help="Comma-separated list of disks"
740
                         " indices to act on (e.g. 0,2) (optional,"
741
                         " defaults to all disks)")
742

    
743
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
744
                         help="Enforces a single-disk configuration using the"
745
                         " given disk size, in MiB unless a suffix is used",
746
                         default=None, type="unit", metavar="<size>")
747

    
748
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
749
                                dest="ignore_consistency",
750
                                action="store_true", default=False,
751
                                help="Ignore the consistency of the disks on"
752
                                " the secondary")
753

    
754
NONLIVE_OPT = cli_option("--non-live", dest="live",
755
                         default=True, action="store_false",
756
                         help="Do a non-live migration (this usually means"
757
                         " freeze the instance, save the state, transfer and"
758
                         " only then resume running on the secondary node)")
759

    
760
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
761
                                default=None,
762
                                choices=list(constants.HT_MIGRATION_MODES),
763
                                help="Override default migration mode (choose"
764
                                " either live or non-live")
765

    
766
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
767
                                help="Target node and optional secondary node",
768
                                metavar="<pnode>[:<snode>]",
769
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
770

    
771
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
772
                           action="append", metavar="<node>",
773
                           help="Use only this node (can be used multiple"
774
                           " times, if not given defaults to all nodes)",
775
                           completion_suggest=OPT_COMPL_ONE_NODE)
776

    
777
NODEGROUP_OPT = cli_option("-g", "--node-group",
778
                           dest="nodegroup",
779
                           help="Node group (name or uuid)",
780
                           metavar="<nodegroup>",
781
                           default=None, type="string",
782
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
783

    
784
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
785
                             metavar="<node>",
786
                             completion_suggest=OPT_COMPL_ONE_NODE)
787

    
788
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
789
                         action="store_false",
790
                         help="Don't start the instance after creation")
791

    
792
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
793
                         action="store_true", default=False,
794
                         help="Show command instead of executing it")
795

    
796
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
797
                         default=False, action="store_true",
798
                         help="Instead of performing the migration, try to"
799
                         " recover from a failed cleanup. This is safe"
800
                         " to run even if the instance is healthy, but it"
801
                         " will create extra replication traffic and "
802
                         " disrupt briefly the replication (like during the"
803
                         " migration")
804

    
805
STATIC_OPT = cli_option("-s", "--static", dest="static",
806
                        action="store_true", default=False,
807
                        help="Only show configuration data, not runtime data")
808

    
809
ALL_OPT = cli_option("--all", dest="show_all",
810
                     default=False, action="store_true",
811
                     help="Show info on all instances on the cluster."
812
                     " This can take a long time to run, use wisely")
813

    
814
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
815
                           action="store_true", default=False,
816
                           help="Interactive OS reinstall, lists available"
817
                           " OS templates for selection")
818

    
819
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
820
                                 action="store_true", default=False,
821
                                 help="Remove the instance from the cluster"
822
                                 " configuration even if there are failures"
823
                                 " during the removal process")
824

    
825
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
826
                                        dest="ignore_remove_failures",
827
                                        action="store_true", default=False,
828
                                        help="Remove the instance from the"
829
                                        " cluster configuration even if there"
830
                                        " are failures during the removal"
831
                                        " process")
832

    
833
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
834
                                 action="store_true", default=False,
835
                                 help="Remove the instance from the cluster")
836

    
837
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
838
                               help="Specifies the new secondary node",
839
                               metavar="NODE", default=None,
840
                               completion_suggest=OPT_COMPL_ONE_NODE)
841

    
842
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
843
                            default=False, action="store_true",
844
                            help="Replace the disk(s) on the primary"
845
                            " node (only for the drbd template)")
846

    
847
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
848
                              default=False, action="store_true",
849
                              help="Replace the disk(s) on the secondary"
850
                              " node (only for the drbd template)")
851

    
852
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
853
                              default=False, action="store_true",
854
                              help="Lock all nodes and auto-promote as needed"
855
                              " to MC status")
856

    
857
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
858
                              default=False, action="store_true",
859
                              help="Automatically replace faulty disks"
860
                              " (only for the drbd template)")
861

    
862
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
863
                             default=False, action="store_true",
864
                             help="Ignore current recorded size"
865
                             " (useful for forcing activation when"
866
                             " the recorded size is wrong)")
867

    
868
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
869
                          metavar="<node>",
870
                          completion_suggest=OPT_COMPL_ONE_NODE)
871

    
872
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
873
                         metavar="<dir>")
874

    
875
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
876
                              help="Specify the secondary ip for the node",
877
                              metavar="ADDRESS", default=None)
878

    
879
READD_OPT = cli_option("--readd", dest="readd",
880
                       default=False, action="store_true",
881
                       help="Readd old node after replacing it")
882

    
883
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
884
                                default=True, action="store_false",
885
                                help="Disable SSH key fingerprint checking")
886

    
887

    
888
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
889
                    type="bool", default=None, metavar=_YORNO,
890
                    help="Set the master_candidate flag on the node")
891

    
892
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
893
                         type="bool", default=None,
894
                         help="Set the offline flag on the node")
895

    
896
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
897
                         type="bool", default=None,
898
                         help="Set the drained flag on the node")
899

    
900
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
901
                    type="bool", default=None, metavar=_YORNO,
902
                    help="Set the master_capable flag on the node")
903

    
904
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
905
                    type="bool", default=None, metavar=_YORNO,
906
                    help="Set the vm_capable flag on the node")
907

    
908
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
909
                             type="bool", default=None, metavar=_YORNO,
910
                             help="Set the allocatable flag on a volume")
911

    
912
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
913
                               help="Disable support for lvm based instances"
914
                               " (cluster-wide)",
915
                               action="store_false", default=True)
916

    
917
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
918
                            dest="enabled_hypervisors",
919
                            help="Comma-separated list of hypervisors",
920
                            type="string", default=None)
921

    
922
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
923
                            type="keyval", default={},
924
                            help="NIC parameters")
925

    
926
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
927
                         dest="candidate_pool_size", type="int",
928
                         help="Set the candidate pool size")
929

    
930
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
931
                         help="Enables LVM and specifies the volume group"
932
                         " name (cluster-wide) for disk allocation [xenvg]",
933
                         metavar="VG", default=None)
934

    
935
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
936
                          help="Destroy cluster", action="store_true")
937

    
938
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
939
                          help="Skip node agreement check (dangerous)",
940
                          action="store_true", default=False)
941

    
942
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
943
                            help="Specify the mac prefix for the instance IP"
944
                            " addresses, in the format XX:XX:XX",
945
                            metavar="PREFIX",
946
                            default=None)
947

    
948
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
949
                               help="Specify the node interface (cluster-wide)"
950
                               " on which the master IP address will be added "
951
                               " [%s]" % constants.DEFAULT_BRIDGE,
952
                               metavar="NETDEV",
953
                               default=constants.DEFAULT_BRIDGE)
954

    
955
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
956
                                help="Specify the default directory (cluster-"
957
                                "wide) for storing the file-based disks [%s]" %
958
                                constants.DEFAULT_FILE_STORAGE_DIR,
959
                                metavar="DIR",
960
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
961

    
962
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
963
                                   help="Don't modify /etc/hosts",
964
                                   action="store_false", default=True)
965

    
966
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
967
                                    help="Don't initialize SSH keys",
968
                                    action="store_false", default=True)
969

    
970
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
971
                             help="Enable parseable error messages",
972
                             action="store_true", default=False)
973

    
974
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
975
                          help="Skip N+1 memory redundancy tests",
976
                          action="store_true", default=False)
977

    
978
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
979
                             help="Type of reboot: soft/hard/full",
980
                             default=constants.INSTANCE_REBOOT_HARD,
981
                             metavar="<REBOOT>",
982
                             choices=list(constants.REBOOT_TYPES))
983

    
984
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
985
                                    dest="ignore_secondaries",
986
                                    default=False, action="store_true",
987
                                    help="Ignore errors from secondaries")
988

    
989
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
990
                            action="store_false", default=True,
991
                            help="Don't shutdown the instance (unsafe)")
992

    
993
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
994
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
995
                         help="Maximum time to wait")
996

    
997
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
998
                         dest="shutdown_timeout", type="int",
999
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1000
                         help="Maximum time to wait for instance shutdown")
1001

    
1002
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1003
                          default=None,
1004
                          help=("Number of seconds between repetions of the"
1005
                                " command"))
1006

    
1007
EARLY_RELEASE_OPT = cli_option("--early-release",
1008
                               dest="early_release", default=False,
1009
                               action="store_true",
1010
                               help="Release the locks on the secondary"
1011
                               " node(s) early")
1012

    
1013
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1014
                                  dest="new_cluster_cert",
1015
                                  default=False, action="store_true",
1016
                                  help="Generate a new cluster certificate")
1017

    
1018
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1019
                           default=None,
1020
                           help="File containing new RAPI certificate")
1021

    
1022
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1023
                               default=None, action="store_true",
1024
                               help=("Generate a new self-signed RAPI"
1025
                                     " certificate"))
1026

    
1027
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1028
                                    dest="new_confd_hmac_key",
1029
                                    default=False, action="store_true",
1030
                                    help=("Create a new HMAC key for %s" %
1031
                                          constants.CONFD))
1032

    
1033
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1034
                                       dest="cluster_domain_secret",
1035
                                       default=None,
1036
                                       help=("Load new new cluster domain"
1037
                                             " secret from file"))
1038

    
1039
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1040
                                           dest="new_cluster_domain_secret",
1041
                                           default=False, action="store_true",
1042
                                           help=("Create a new cluster domain"
1043
                                                 " secret"))
1044

    
1045
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1046
                              dest="use_replication_network",
1047
                              help="Whether to use the replication network"
1048
                              " for talking to the nodes",
1049
                              action="store_true", default=False)
1050

    
1051
MAINTAIN_NODE_HEALTH_OPT = \
1052
    cli_option("--maintain-node-health", dest="maintain_node_health",
1053
               metavar=_YORNO, default=None, type="bool",
1054
               help="Configure the cluster to automatically maintain node"
1055
               " health, by shutting down unknown instances, shutting down"
1056
               " unknown DRBD devices, etc.")
1057

    
1058
IDENTIFY_DEFAULTS_OPT = \
1059
    cli_option("--identify-defaults", dest="identify_defaults",
1060
               default=False, action="store_true",
1061
               help="Identify which saved instance parameters are equal to"
1062
               " the current cluster defaults and set them as such, instead"
1063
               " of marking them as overridden")
1064

    
1065
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1066
                         action="store", dest="uid_pool",
1067
                         help=("A list of user-ids or user-id"
1068
                               " ranges separated by commas"))
1069

    
1070
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1071
                          action="store", dest="add_uids",
1072
                          help=("A list of user-ids or user-id"
1073
                                " ranges separated by commas, to be"
1074
                                " added to the user-id pool"))
1075

    
1076
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1077
                             action="store", dest="remove_uids",
1078
                             help=("A list of user-ids or user-id"
1079
                                   " ranges separated by commas, to be"
1080
                                   " removed from the user-id pool"))
1081

    
1082
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1083
                             action="store", dest="reserved_lvs",
1084
                             help=("A comma-separated list of reserved"
1085
                                   " logical volumes names, that will be"
1086
                                   " ignored by cluster verify"))
1087

    
1088
ROMAN_OPT = cli_option("--roman",
1089
                       dest="roman_integers", default=False,
1090
                       action="store_true",
1091
                       help="Use roman numbers for positive integers")
1092

    
1093
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1094
                             action="store", default=None,
1095
                             help="Specifies usermode helper for DRBD")
1096

    
1097
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1098
                                action="store_false", default=True,
1099
                                help="Disable support for DRBD")
1100

    
1101
PRIMARY_IP_VERSION_OPT = \
1102
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1103
               action="store", dest="primary_ip_version",
1104
               metavar="%d|%d" % (constants.IP4_VERSION,
1105
                                  constants.IP6_VERSION),
1106
               help="Cluster-wide IP version for primary IP")
1107

    
1108
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1109
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1110
                          choices=_PRIONAME_TO_VALUE.keys(),
1111
                          help="Priority for opcode processing")
1112

    
1113
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1114
                        type="bool", default=None, metavar=_YORNO,
1115
                        help="Sets the hidden flag on the OS")
1116

    
1117
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1118
                        type="bool", default=None, metavar=_YORNO,
1119
                        help="Sets the blacklisted flag on the OS")
1120

    
1121
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1122
                                     type="bool", metavar=_YORNO,
1123
                                     dest="prealloc_wipe_disks",
1124
                                     help=("Wipe disks prior to instance"
1125
                                           " creation"))
1126

    
1127
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1128
                             type="keyval", default=None,
1129
                             help="Node parameters")
1130

    
1131
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1132
                              action="store", metavar="POLICY", default=None,
1133
                              help="Allocation policy for the node group")
1134

    
1135
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1136
                              type="bool", metavar=_YORNO,
1137
                              dest="node_powered",
1138
                              help="Specify if the SoR for node is powered")
1139

    
1140

    
1141
#: Options provided by all commands
1142
COMMON_OPTS = [DEBUG_OPT]
1143

    
1144
# common options for creating instances. add and import then add their own
1145
# specific ones.
1146
COMMON_CREATE_OPTS = [
1147
  BACKEND_OPT,
1148
  DISK_OPT,
1149
  DISK_TEMPLATE_OPT,
1150
  FILESTORE_DIR_OPT,
1151
  FILESTORE_DRIVER_OPT,
1152
  HYPERVISOR_OPT,
1153
  IALLOCATOR_OPT,
1154
  NET_OPT,
1155
  NODE_PLACEMENT_OPT,
1156
  NOIPCHECK_OPT,
1157
  NONAMECHECK_OPT,
1158
  NONICS_OPT,
1159
  NWSYNC_OPT,
1160
  OSPARAMS_OPT,
1161
  OS_SIZE_OPT,
1162
  SUBMIT_OPT,
1163
  DRY_RUN_OPT,
1164
  PRIORITY_OPT,
1165
  ]
1166

    
1167

    
1168
def _ParseArgs(argv, commands, aliases):
1169
  """Parser for the command line arguments.
1170

1171
  This function parses the arguments and returns the function which
1172
  must be executed together with its (modified) arguments.
1173

1174
  @param argv: the command line
1175
  @param commands: dictionary with special contents, see the design
1176
      doc for cmdline handling
1177
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1178

1179
  """
1180
  if len(argv) == 0:
1181
    binary = "<command>"
1182
  else:
1183
    binary = argv[0].split("/")[-1]
1184

    
1185
  if len(argv) > 1 and argv[1] == "--version":
1186
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1187
             constants.RELEASE_VERSION)
1188
    # Quit right away. That way we don't have to care about this special
1189
    # argument. optparse.py does it the same.
1190
    sys.exit(0)
1191

    
1192
  if len(argv) < 2 or not (argv[1] in commands or
1193
                           argv[1] in aliases):
1194
    # let's do a nice thing
1195
    sortedcmds = commands.keys()
1196
    sortedcmds.sort()
1197

    
1198
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1199
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1200
    ToStdout("")
1201

    
1202
    # compute the max line length for cmd + usage
1203
    mlen = max([len(" %s" % cmd) for cmd in commands])
1204
    mlen = min(60, mlen) # should not get here...
1205

    
1206
    # and format a nice command list
1207
    ToStdout("Commands:")
1208
    for cmd in sortedcmds:
1209
      cmdstr = " %s" % (cmd,)
1210
      help_text = commands[cmd][4]
1211
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1212
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1213
      for line in help_lines:
1214
        ToStdout("%-*s   %s", mlen, "", line)
1215

    
1216
    ToStdout("")
1217

    
1218
    return None, None, None
1219

    
1220
  # get command, unalias it, and look it up in commands
1221
  cmd = argv.pop(1)
1222
  if cmd in aliases:
1223
    if cmd in commands:
1224
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1225
                                   " command" % cmd)
1226

    
1227
    if aliases[cmd] not in commands:
1228
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1229
                                   " command '%s'" % (cmd, aliases[cmd]))
1230

    
1231
    cmd = aliases[cmd]
1232

    
1233
  func, args_def, parser_opts, usage, description = commands[cmd]
1234
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1235
                        description=description,
1236
                        formatter=TitledHelpFormatter(),
1237
                        usage="%%prog %s %s" % (cmd, usage))
1238
  parser.disable_interspersed_args()
1239
  options, args = parser.parse_args()
1240

    
1241
  if not _CheckArguments(cmd, args_def, args):
1242
    return None, None, None
1243

    
1244
  return func, options, args
1245

    
1246

    
1247
def _CheckArguments(cmd, args_def, args):
1248
  """Verifies the arguments using the argument definition.
1249

1250
  Algorithm:
1251

1252
    1. Abort with error if values specified by user but none expected.
1253

1254
    1. For each argument in definition
1255

1256
      1. Keep running count of minimum number of values (min_count)
1257
      1. Keep running count of maximum number of values (max_count)
1258
      1. If it has an unlimited number of values
1259

1260
        1. Abort with error if it's not the last argument in the definition
1261

1262
    1. If last argument has limited number of values
1263

1264
      1. Abort with error if number of values doesn't match or is too large
1265

1266
    1. Abort with error if user didn't pass enough values (min_count)
1267

1268
  """
1269
  if args and not args_def:
1270
    ToStderr("Error: Command %s expects no arguments", cmd)
1271
    return False
1272

    
1273
  min_count = None
1274
  max_count = None
1275
  check_max = None
1276

    
1277
  last_idx = len(args_def) - 1
1278

    
1279
  for idx, arg in enumerate(args_def):
1280
    if min_count is None:
1281
      min_count = arg.min
1282
    elif arg.min is not None:
1283
      min_count += arg.min
1284

    
1285
    if max_count is None:
1286
      max_count = arg.max
1287
    elif arg.max is not None:
1288
      max_count += arg.max
1289

    
1290
    if idx == last_idx:
1291
      check_max = (arg.max is not None)
1292

    
1293
    elif arg.max is None:
1294
      raise errors.ProgrammerError("Only the last argument can have max=None")
1295

    
1296
  if check_max:
1297
    # Command with exact number of arguments
1298
    if (min_count is not None and max_count is not None and
1299
        min_count == max_count and len(args) != min_count):
1300
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1301
      return False
1302

    
1303
    # Command with limited number of arguments
1304
    if max_count is not None and len(args) > max_count:
1305
      ToStderr("Error: Command %s expects only %d argument(s)",
1306
               cmd, max_count)
1307
      return False
1308

    
1309
  # Command with some required arguments
1310
  if min_count is not None and len(args) < min_count:
1311
    ToStderr("Error: Command %s expects at least %d argument(s)",
1312
             cmd, min_count)
1313
    return False
1314

    
1315
  return True
1316

    
1317

    
1318
def SplitNodeOption(value):
1319
  """Splits the value of a --node option.
1320

1321
  """
1322
  if value and ':' in value:
1323
    return value.split(':', 1)
1324
  else:
1325
    return (value, None)
1326

    
1327

    
1328
def CalculateOSNames(os_name, os_variants):
1329
  """Calculates all the names an OS can be called, according to its variants.
1330

1331
  @type os_name: string
1332
  @param os_name: base name of the os
1333
  @type os_variants: list or None
1334
  @param os_variants: list of supported variants
1335
  @rtype: list
1336
  @return: list of valid names
1337

1338
  """
1339
  if os_variants:
1340
    return ['%s+%s' % (os_name, v) for v in os_variants]
1341
  else:
1342
    return [os_name]
1343

    
1344

    
1345
def ParseFields(selected, default):
1346
  """Parses the values of "--field"-like options.
1347

1348
  @type selected: string or None
1349
  @param selected: User-selected options
1350
  @type default: list
1351
  @param default: Default fields
1352

1353
  """
1354
  if selected is None:
1355
    return default
1356

    
1357
  if selected.startswith("+"):
1358
    return default + selected[1:].split(",")
1359

    
1360
  return selected.split(",")
1361

    
1362

    
1363
UsesRPC = rpc.RunWithRPC
1364

    
1365

    
1366
def AskUser(text, choices=None):
1367
  """Ask the user a question.
1368

1369
  @param text: the question to ask
1370

1371
  @param choices: list with elements tuples (input_char, return_value,
1372
      description); if not given, it will default to: [('y', True,
1373
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1374
      note that the '?' char is reserved for help
1375

1376
  @return: one of the return values from the choices list; if input is
1377
      not possible (i.e. not running with a tty, we return the last
1378
      entry from the list
1379

1380
  """
1381
  if choices is None:
1382
    choices = [('y', True, 'Perform the operation'),
1383
               ('n', False, 'Do not perform the operation')]
1384
  if not choices or not isinstance(choices, list):
1385
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1386
  for entry in choices:
1387
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1388
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1389

    
1390
  answer = choices[-1][1]
1391
  new_text = []
1392
  for line in text.splitlines():
1393
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1394
  text = "\n".join(new_text)
1395
  try:
1396
    f = file("/dev/tty", "a+")
1397
  except IOError:
1398
    return answer
1399
  try:
1400
    chars = [entry[0] for entry in choices]
1401
    chars[-1] = "[%s]" % chars[-1]
1402
    chars.append('?')
1403
    maps = dict([(entry[0], entry[1]) for entry in choices])
1404
    while True:
1405
      f.write(text)
1406
      f.write('\n')
1407
      f.write("/".join(chars))
1408
      f.write(": ")
1409
      line = f.readline(2).strip().lower()
1410
      if line in maps:
1411
        answer = maps[line]
1412
        break
1413
      elif line == '?':
1414
        for entry in choices:
1415
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1416
        f.write("\n")
1417
        continue
1418
  finally:
1419
    f.close()
1420
  return answer
1421

    
1422

    
1423
class JobSubmittedException(Exception):
1424
  """Job was submitted, client should exit.
1425

1426
  This exception has one argument, the ID of the job that was
1427
  submitted. The handler should print this ID.
1428

1429
  This is not an error, just a structured way to exit from clients.
1430

1431
  """
1432

    
1433

    
1434
def SendJob(ops, cl=None):
1435
  """Function to submit an opcode without waiting for the results.
1436

1437
  @type ops: list
1438
  @param ops: list of opcodes
1439
  @type cl: luxi.Client
1440
  @param cl: the luxi client to use for communicating with the master;
1441
             if None, a new client will be created
1442

1443
  """
1444
  if cl is None:
1445
    cl = GetClient()
1446

    
1447
  job_id = cl.SubmitJob(ops)
1448

    
1449
  return job_id
1450

    
1451

    
1452
def GenericPollJob(job_id, cbs, report_cbs):
1453
  """Generic job-polling function.
1454

1455
  @type job_id: number
1456
  @param job_id: Job ID
1457
  @type cbs: Instance of L{JobPollCbBase}
1458
  @param cbs: Data callbacks
1459
  @type report_cbs: Instance of L{JobPollReportCbBase}
1460
  @param report_cbs: Reporting callbacks
1461

1462
  """
1463
  prev_job_info = None
1464
  prev_logmsg_serial = None
1465

    
1466
  status = None
1467

    
1468
  while True:
1469
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1470
                                      prev_logmsg_serial)
1471
    if not result:
1472
      # job not found, go away!
1473
      raise errors.JobLost("Job with id %s lost" % job_id)
1474

    
1475
    if result == constants.JOB_NOTCHANGED:
1476
      report_cbs.ReportNotChanged(job_id, status)
1477

    
1478
      # Wait again
1479
      continue
1480

    
1481
    # Split result, a tuple of (field values, log entries)
1482
    (job_info, log_entries) = result
1483
    (status, ) = job_info
1484

    
1485
    if log_entries:
1486
      for log_entry in log_entries:
1487
        (serial, timestamp, log_type, message) = log_entry
1488
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1489
                                    log_type, message)
1490
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1491

    
1492
    # TODO: Handle canceled and archived jobs
1493
    elif status in (constants.JOB_STATUS_SUCCESS,
1494
                    constants.JOB_STATUS_ERROR,
1495
                    constants.JOB_STATUS_CANCELING,
1496
                    constants.JOB_STATUS_CANCELED):
1497
      break
1498

    
1499
    prev_job_info = job_info
1500

    
1501
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1502
  if not jobs:
1503
    raise errors.JobLost("Job with id %s lost" % job_id)
1504

    
1505
  status, opstatus, result = jobs[0]
1506

    
1507
  if status == constants.JOB_STATUS_SUCCESS:
1508
    return result
1509

    
1510
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1511
    raise errors.OpExecError("Job was canceled")
1512

    
1513
  has_ok = False
1514
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1515
    if status == constants.OP_STATUS_SUCCESS:
1516
      has_ok = True
1517
    elif status == constants.OP_STATUS_ERROR:
1518
      errors.MaybeRaise(msg)
1519

    
1520
      if has_ok:
1521
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1522
                                 (idx, msg))
1523

    
1524
      raise errors.OpExecError(str(msg))
1525

    
1526
  # default failure mode
1527
  raise errors.OpExecError(result)
1528

    
1529

    
1530
class JobPollCbBase:
1531
  """Base class for L{GenericPollJob} callbacks.
1532

1533
  """
1534
  def __init__(self):
1535
    """Initializes this class.
1536

1537
    """
1538

    
1539
  def WaitForJobChangeOnce(self, job_id, fields,
1540
                           prev_job_info, prev_log_serial):
1541
    """Waits for changes on a job.
1542

1543
    """
1544
    raise NotImplementedError()
1545

    
1546
  def QueryJobs(self, job_ids, fields):
1547
    """Returns the selected fields for the selected job IDs.
1548

1549
    @type job_ids: list of numbers
1550
    @param job_ids: Job IDs
1551
    @type fields: list of strings
1552
    @param fields: Fields
1553

1554
    """
1555
    raise NotImplementedError()
1556

    
1557

    
1558
class JobPollReportCbBase:
1559
  """Base class for L{GenericPollJob} reporting callbacks.
1560

1561
  """
1562
  def __init__(self):
1563
    """Initializes this class.
1564

1565
    """
1566

    
1567
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1568
    """Handles a log message.
1569

1570
    """
1571
    raise NotImplementedError()
1572

    
1573
  def ReportNotChanged(self, job_id, status):
1574
    """Called for if a job hasn't changed in a while.
1575

1576
    @type job_id: number
1577
    @param job_id: Job ID
1578
    @type status: string or None
1579
    @param status: Job status if available
1580

1581
    """
1582
    raise NotImplementedError()
1583

    
1584

    
1585
class _LuxiJobPollCb(JobPollCbBase):
1586
  def __init__(self, cl):
1587
    """Initializes this class.
1588

1589
    """
1590
    JobPollCbBase.__init__(self)
1591
    self.cl = cl
1592

    
1593
  def WaitForJobChangeOnce(self, job_id, fields,
1594
                           prev_job_info, prev_log_serial):
1595
    """Waits for changes on a job.
1596

1597
    """
1598
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1599
                                        prev_job_info, prev_log_serial)
1600

    
1601
  def QueryJobs(self, job_ids, fields):
1602
    """Returns the selected fields for the selected job IDs.
1603

1604
    """
1605
    return self.cl.QueryJobs(job_ids, fields)
1606

    
1607

    
1608
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1609
  def __init__(self, feedback_fn):
1610
    """Initializes this class.
1611

1612
    """
1613
    JobPollReportCbBase.__init__(self)
1614

    
1615
    self.feedback_fn = feedback_fn
1616

    
1617
    assert callable(feedback_fn)
1618

    
1619
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1620
    """Handles a log message.
1621

1622
    """
1623
    self.feedback_fn((timestamp, log_type, log_msg))
1624

    
1625
  def ReportNotChanged(self, job_id, status):
1626
    """Called if a job hasn't changed in a while.
1627

1628
    """
1629
    # Ignore
1630

    
1631

    
1632
class StdioJobPollReportCb(JobPollReportCbBase):
1633
  def __init__(self):
1634
    """Initializes this class.
1635

1636
    """
1637
    JobPollReportCbBase.__init__(self)
1638

    
1639
    self.notified_queued = False
1640
    self.notified_waitlock = False
1641

    
1642
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1643
    """Handles a log message.
1644

1645
    """
1646
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1647
             FormatLogMessage(log_type, log_msg))
1648

    
1649
  def ReportNotChanged(self, job_id, status):
1650
    """Called if a job hasn't changed in a while.
1651

1652
    """
1653
    if status is None:
1654
      return
1655

    
1656
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1657
      ToStderr("Job %s is waiting in queue", job_id)
1658
      self.notified_queued = True
1659

    
1660
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1661
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1662
      self.notified_waitlock = True
1663

    
1664

    
1665
def FormatLogMessage(log_type, log_msg):
1666
  """Formats a job message according to its type.
1667

1668
  """
1669
  if log_type != constants.ELOG_MESSAGE:
1670
    log_msg = str(log_msg)
1671

    
1672
  return utils.SafeEncode(log_msg)
1673

    
1674

    
1675
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1676
  """Function to poll for the result of a job.
1677

1678
  @type job_id: job identified
1679
  @param job_id: the job to poll for results
1680
  @type cl: luxi.Client
1681
  @param cl: the luxi client to use for communicating with the master;
1682
             if None, a new client will be created
1683

1684
  """
1685
  if cl is None:
1686
    cl = GetClient()
1687

    
1688
  if reporter is None:
1689
    if feedback_fn:
1690
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1691
    else:
1692
      reporter = StdioJobPollReportCb()
1693
  elif feedback_fn:
1694
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1695

    
1696
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1697

    
1698

    
1699
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1700
  """Legacy function to submit an opcode.
1701

1702
  This is just a simple wrapper over the construction of the processor
1703
  instance. It should be extended to better handle feedback and
1704
  interaction functions.
1705

1706
  """
1707
  if cl is None:
1708
    cl = GetClient()
1709

    
1710
  SetGenericOpcodeOpts([op], opts)
1711

    
1712
  job_id = SendJob([op], cl=cl)
1713

    
1714
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1715
                       reporter=reporter)
1716

    
1717
  return op_results[0]
1718

    
1719

    
1720
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1721
  """Wrapper around SubmitOpCode or SendJob.
1722

1723
  This function will decide, based on the 'opts' parameter, whether to
1724
  submit and wait for the result of the opcode (and return it), or
1725
  whether to just send the job and print its identifier. It is used in
1726
  order to simplify the implementation of the '--submit' option.
1727

1728
  It will also process the opcodes if we're sending the via SendJob
1729
  (otherwise SubmitOpCode does it).
1730

1731
  """
1732
  if opts and opts.submit_only:
1733
    job = [op]
1734
    SetGenericOpcodeOpts(job, opts)
1735
    job_id = SendJob(job, cl=cl)
1736
    raise JobSubmittedException(job_id)
1737
  else:
1738
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1739

    
1740

    
1741
def SetGenericOpcodeOpts(opcode_list, options):
1742
  """Processor for generic options.
1743

1744
  This function updates the given opcodes based on generic command
1745
  line options (like debug, dry-run, etc.).
1746

1747
  @param opcode_list: list of opcodes
1748
  @param options: command line options or None
1749
  @return: None (in-place modification)
1750

1751
  """
1752
  if not options:
1753
    return
1754
  for op in opcode_list:
1755
    op.debug_level = options.debug
1756
    if hasattr(options, "dry_run"):
1757
      op.dry_run = options.dry_run
1758
    if getattr(options, "priority", None) is not None:
1759
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1760

    
1761

    
1762
def GetClient():
1763
  # TODO: Cache object?
1764
  try:
1765
    client = luxi.Client()
1766
  except luxi.NoMasterError:
1767
    ss = ssconf.SimpleStore()
1768

    
1769
    # Try to read ssconf file
1770
    try:
1771
      ss.GetMasterNode()
1772
    except errors.ConfigurationError:
1773
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1774
                                 " not part of a cluster")
1775

    
1776
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1777
    if master != myself:
1778
      raise errors.OpPrereqError("This is not the master node, please connect"
1779
                                 " to node '%s' and rerun the command" %
1780
                                 master)
1781
    raise
1782
  return client
1783

    
1784

    
1785
def FormatError(err):
1786
  """Return a formatted error message for a given error.
1787

1788
  This function takes an exception instance and returns a tuple
1789
  consisting of two values: first, the recommended exit code, and
1790
  second, a string describing the error message (not
1791
  newline-terminated).
1792

1793
  """
1794
  retcode = 1
1795
  obuf = StringIO()
1796
  msg = str(err)
1797
  if isinstance(err, errors.ConfigurationError):
1798
    txt = "Corrupt configuration file: %s" % msg
1799
    logging.error(txt)
1800
    obuf.write(txt + "\n")
1801
    obuf.write("Aborting.")
1802
    retcode = 2
1803
  elif isinstance(err, errors.HooksAbort):
1804
    obuf.write("Failure: hooks execution failed:\n")
1805
    for node, script, out in err.args[0]:
1806
      if out:
1807
        obuf.write("  node: %s, script: %s, output: %s\n" %
1808
                   (node, script, out))
1809
      else:
1810
        obuf.write("  node: %s, script: %s (no output)\n" %
1811
                   (node, script))
1812
  elif isinstance(err, errors.HooksFailure):
1813
    obuf.write("Failure: hooks general failure: %s" % msg)
1814
  elif isinstance(err, errors.ResolverError):
1815
    this_host = netutils.Hostname.GetSysName()
1816
    if err.args[0] == this_host:
1817
      msg = "Failure: can't resolve my own hostname ('%s')"
1818
    else:
1819
      msg = "Failure: can't resolve hostname '%s'"
1820
    obuf.write(msg % err.args[0])
1821
  elif isinstance(err, errors.OpPrereqError):
1822
    if len(err.args) == 2:
1823
      obuf.write("Failure: prerequisites not met for this"
1824
               " operation:\nerror type: %s, error details:\n%s" %
1825
                 (err.args[1], err.args[0]))
1826
    else:
1827
      obuf.write("Failure: prerequisites not met for this"
1828
                 " operation:\n%s" % msg)
1829
  elif isinstance(err, errors.OpExecError):
1830
    obuf.write("Failure: command execution error:\n%s" % msg)
1831
  elif isinstance(err, errors.TagError):
1832
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1833
  elif isinstance(err, errors.JobQueueDrainError):
1834
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1835
               " accept new requests\n")
1836
  elif isinstance(err, errors.JobQueueFull):
1837
    obuf.write("Failure: the job queue is full and doesn't accept new"
1838
               " job submissions until old jobs are archived\n")
1839
  elif isinstance(err, errors.TypeEnforcementError):
1840
    obuf.write("Parameter Error: %s" % msg)
1841
  elif isinstance(err, errors.ParameterError):
1842
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1843
  elif isinstance(err, luxi.NoMasterError):
1844
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1845
               " and listening for connections?")
1846
  elif isinstance(err, luxi.TimeoutError):
1847
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1848
               " been submitted and will continue to run even if the call"
1849
               " timed out. Useful commands in this situation are \"gnt-job"
1850
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1851
    obuf.write(msg)
1852
  elif isinstance(err, luxi.PermissionError):
1853
    obuf.write("It seems you don't have permissions to connect to the"
1854
               " master daemon.\nPlease retry as a different user.")
1855
  elif isinstance(err, luxi.ProtocolError):
1856
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1857
               "%s" % msg)
1858
  elif isinstance(err, errors.JobLost):
1859
    obuf.write("Error checking job status: %s" % msg)
1860
  elif isinstance(err, errors.GenericError):
1861
    obuf.write("Unhandled Ganeti error: %s" % msg)
1862
  elif isinstance(err, JobSubmittedException):
1863
    obuf.write("JobID: %s\n" % err.args[0])
1864
    retcode = 0
1865
  else:
1866
    obuf.write("Unhandled exception: %s" % msg)
1867
  return retcode, obuf.getvalue().rstrip('\n')
1868

    
1869

    
1870
def GenericMain(commands, override=None, aliases=None):
1871
  """Generic main function for all the gnt-* commands.
1872

1873
  Arguments:
1874
    - commands: a dictionary with a special structure, see the design doc
1875
                for command line handling.
1876
    - override: if not None, we expect a dictionary with keys that will
1877
                override command line options; this can be used to pass
1878
                options from the scripts to generic functions
1879
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1880

1881
  """
1882
  # save the program name and the entire command line for later logging
1883
  if sys.argv:
1884
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1885
    if len(sys.argv) >= 2:
1886
      binary += " " + sys.argv[1]
1887
      old_cmdline = " ".join(sys.argv[2:])
1888
    else:
1889
      old_cmdline = ""
1890
  else:
1891
    binary = "<unknown program>"
1892
    old_cmdline = ""
1893

    
1894
  if aliases is None:
1895
    aliases = {}
1896

    
1897
  try:
1898
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1899
  except errors.ParameterError, err:
1900
    result, err_msg = FormatError(err)
1901
    ToStderr(err_msg)
1902
    return 1
1903

    
1904
  if func is None: # parse error
1905
    return 1
1906

    
1907
  if override is not None:
1908
    for key, val in override.iteritems():
1909
      setattr(options, key, val)
1910

    
1911
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1912
                     stderr_logging=True, program=binary)
1913

    
1914
  if old_cmdline:
1915
    logging.info("run with arguments '%s'", old_cmdline)
1916
  else:
1917
    logging.info("run with no arguments")
1918

    
1919
  try:
1920
    result = func(options, args)
1921
  except (errors.GenericError, luxi.ProtocolError,
1922
          JobSubmittedException), err:
1923
    result, err_msg = FormatError(err)
1924
    logging.exception("Error during command processing")
1925
    ToStderr(err_msg)
1926

    
1927
  return result
1928

    
1929

    
1930
def ParseNicOption(optvalue):
1931
  """Parses the value of the --net option(s).
1932

1933
  """
1934
  try:
1935
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1936
  except (TypeError, ValueError), err:
1937
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1938

    
1939
  nics = [{}] * nic_max
1940
  for nidx, ndict in optvalue:
1941
    nidx = int(nidx)
1942

    
1943
    if not isinstance(ndict, dict):
1944
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1945
                                 " got %s" % (nidx, ndict))
1946

    
1947
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1948

    
1949
    nics[nidx] = ndict
1950

    
1951
  return nics
1952

    
1953

    
1954
def GenericInstanceCreate(mode, opts, args):
1955
  """Add an instance to the cluster via either creation or import.
1956

1957
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1958
  @param opts: the command line options selected by the user
1959
  @type args: list
1960
  @param args: should contain only one element, the new instance name
1961
  @rtype: int
1962
  @return: the desired exit code
1963

1964
  """
1965
  instance = args[0]
1966

    
1967
  (pnode, snode) = SplitNodeOption(opts.node)
1968

    
1969
  hypervisor = None
1970
  hvparams = {}
1971
  if opts.hypervisor:
1972
    hypervisor, hvparams = opts.hypervisor
1973

    
1974
  if opts.nics:
1975
    nics = ParseNicOption(opts.nics)
1976
  elif opts.no_nics:
1977
    # no nics
1978
    nics = []
1979
  elif mode == constants.INSTANCE_CREATE:
1980
    # default of one nic, all auto
1981
    nics = [{}]
1982
  else:
1983
    # mode == import
1984
    nics = []
1985

    
1986
  if opts.disk_template == constants.DT_DISKLESS:
1987
    if opts.disks or opts.sd_size is not None:
1988
      raise errors.OpPrereqError("Diskless instance but disk"
1989
                                 " information passed")
1990
    disks = []
1991
  else:
1992
    if (not opts.disks and not opts.sd_size
1993
        and mode == constants.INSTANCE_CREATE):
1994
      raise errors.OpPrereqError("No disk information specified")
1995
    if opts.disks and opts.sd_size is not None:
1996
      raise errors.OpPrereqError("Please use either the '--disk' or"
1997
                                 " '-s' option")
1998
    if opts.sd_size is not None:
1999
      opts.disks = [(0, {"size": opts.sd_size})]
2000

    
2001
    if opts.disks:
2002
      try:
2003
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2004
      except ValueError, err:
2005
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2006
      disks = [{}] * disk_max
2007
    else:
2008
      disks = []
2009
    for didx, ddict in opts.disks:
2010
      didx = int(didx)
2011
      if not isinstance(ddict, dict):
2012
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2013
        raise errors.OpPrereqError(msg)
2014
      elif "size" in ddict:
2015
        if "adopt" in ddict:
2016
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2017
                                     " (disk %d)" % didx)
2018
        try:
2019
          ddict["size"] = utils.ParseUnit(ddict["size"])
2020
        except ValueError, err:
2021
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2022
                                     (didx, err))
2023
      elif "adopt" in ddict:
2024
        if mode == constants.INSTANCE_IMPORT:
2025
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2026
                                     " import")
2027
        ddict["size"] = 0
2028
      else:
2029
        raise errors.OpPrereqError("Missing size or adoption source for"
2030
                                   " disk %d" % didx)
2031
      disks[didx] = ddict
2032

    
2033
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2034
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2035

    
2036
  if mode == constants.INSTANCE_CREATE:
2037
    start = opts.start
2038
    os_type = opts.os
2039
    force_variant = opts.force_variant
2040
    src_node = None
2041
    src_path = None
2042
    no_install = opts.no_install
2043
    identify_defaults = False
2044
  elif mode == constants.INSTANCE_IMPORT:
2045
    start = False
2046
    os_type = None
2047
    force_variant = False
2048
    src_node = opts.src_node
2049
    src_path = opts.src_dir
2050
    no_install = None
2051
    identify_defaults = opts.identify_defaults
2052
  else:
2053
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2054

    
2055
  op = opcodes.OpCreateInstance(instance_name=instance,
2056
                                disks=disks,
2057
                                disk_template=opts.disk_template,
2058
                                nics=nics,
2059
                                pnode=pnode, snode=snode,
2060
                                ip_check=opts.ip_check,
2061
                                name_check=opts.name_check,
2062
                                wait_for_sync=opts.wait_for_sync,
2063
                                file_storage_dir=opts.file_storage_dir,
2064
                                file_driver=opts.file_driver,
2065
                                iallocator=opts.iallocator,
2066
                                hypervisor=hypervisor,
2067
                                hvparams=hvparams,
2068
                                beparams=opts.beparams,
2069
                                osparams=opts.osparams,
2070
                                mode=mode,
2071
                                start=start,
2072
                                os_type=os_type,
2073
                                force_variant=force_variant,
2074
                                src_node=src_node,
2075
                                src_path=src_path,
2076
                                no_install=no_install,
2077
                                identify_defaults=identify_defaults)
2078

    
2079
  SubmitOrSend(op, opts)
2080
  return 0
2081

    
2082

    
2083
class _RunWhileClusterStoppedHelper:
2084
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2085

2086
  """
2087
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2088
    """Initializes this class.
2089

2090
    @type feedback_fn: callable
2091
    @param feedback_fn: Feedback function
2092
    @type cluster_name: string
2093
    @param cluster_name: Cluster name
2094
    @type master_node: string
2095
    @param master_node Master node name
2096
    @type online_nodes: list
2097
    @param online_nodes: List of names of online nodes
2098

2099
    """
2100
    self.feedback_fn = feedback_fn
2101
    self.cluster_name = cluster_name
2102
    self.master_node = master_node
2103
    self.online_nodes = online_nodes
2104

    
2105
    self.ssh = ssh.SshRunner(self.cluster_name)
2106

    
2107
    self.nonmaster_nodes = [name for name in online_nodes
2108
                            if name != master_node]
2109

    
2110
    assert self.master_node not in self.nonmaster_nodes
2111

    
2112
  def _RunCmd(self, node_name, cmd):
2113
    """Runs a command on the local or a remote machine.
2114

2115
    @type node_name: string
2116
    @param node_name: Machine name
2117
    @type cmd: list
2118
    @param cmd: Command
2119

2120
    """
2121
    if node_name is None or node_name == self.master_node:
2122
      # No need to use SSH
2123
      result = utils.RunCmd(cmd)
2124
    else:
2125
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2126

    
2127
    if result.failed:
2128
      errmsg = ["Failed to run command %s" % result.cmd]
2129
      if node_name:
2130
        errmsg.append("on node %s" % node_name)
2131
      errmsg.append(": exitcode %s and error %s" %
2132
                    (result.exit_code, result.output))
2133
      raise errors.OpExecError(" ".join(errmsg))
2134

    
2135
  def Call(self, fn, *args):
2136
    """Call function while all daemons are stopped.
2137

2138
    @type fn: callable
2139
    @param fn: Function to be called
2140

2141
    """
2142
    # Pause watcher by acquiring an exclusive lock on watcher state file
2143
    self.feedback_fn("Blocking watcher")
2144
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2145
    try:
2146
      # TODO: Currently, this just blocks. There's no timeout.
2147
      # TODO: Should it be a shared lock?
2148
      watcher_block.Exclusive(blocking=True)
2149

    
2150
      # Stop master daemons, so that no new jobs can come in and all running
2151
      # ones are finished
2152
      self.feedback_fn("Stopping master daemons")
2153
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2154
      try:
2155
        # Stop daemons on all nodes
2156
        for node_name in self.online_nodes:
2157
          self.feedback_fn("Stopping daemons on %s" % node_name)
2158
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2159

    
2160
        # All daemons are shut down now
2161
        try:
2162
          return fn(self, *args)
2163
        except Exception, err:
2164
          _, errmsg = FormatError(err)
2165
          logging.exception("Caught exception")
2166
          self.feedback_fn(errmsg)
2167
          raise
2168
      finally:
2169
        # Start cluster again, master node last
2170
        for node_name in self.nonmaster_nodes + [self.master_node]:
2171
          self.feedback_fn("Starting daemons on %s" % node_name)
2172
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2173
    finally:
2174
      # Resume watcher
2175
      watcher_block.Close()
2176

    
2177

    
2178
def RunWhileClusterStopped(feedback_fn, fn, *args):
2179
  """Calls a function while all cluster daemons are stopped.
2180

2181
  @type feedback_fn: callable
2182
  @param feedback_fn: Feedback function
2183
  @type fn: callable
2184
  @param fn: Function to be called when daemons are stopped
2185

2186
  """
2187
  feedback_fn("Gathering cluster information")
2188

    
2189
  # This ensures we're running on the master daemon
2190
  cl = GetClient()
2191

    
2192
  (cluster_name, master_node) = \
2193
    cl.QueryConfigValues(["cluster_name", "master_node"])
2194

    
2195
  online_nodes = GetOnlineNodes([], cl=cl)
2196

    
2197
  # Don't keep a reference to the client. The master daemon will go away.
2198
  del cl
2199

    
2200
  assert master_node in online_nodes
2201

    
2202
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2203
                                       online_nodes).Call(fn, *args)
2204

    
2205

    
2206
def GenerateTable(headers, fields, separator, data,
2207
                  numfields=None, unitfields=None,
2208
                  units=None):
2209
  """Prints a table with headers and different fields.
2210

2211
  @type headers: dict
2212
  @param headers: dictionary mapping field names to headers for
2213
      the table
2214
  @type fields: list
2215
  @param fields: the field names corresponding to each row in
2216
      the data field
2217
  @param separator: the separator to be used; if this is None,
2218
      the default 'smart' algorithm is used which computes optimal
2219
      field width, otherwise just the separator is used between
2220
      each field
2221
  @type data: list
2222
  @param data: a list of lists, each sublist being one row to be output
2223
  @type numfields: list
2224
  @param numfields: a list with the fields that hold numeric
2225
      values and thus should be right-aligned
2226
  @type unitfields: list
2227
  @param unitfields: a list with the fields that hold numeric
2228
      values that should be formatted with the units field
2229
  @type units: string or None
2230
  @param units: the units we should use for formatting, or None for
2231
      automatic choice (human-readable for non-separator usage, otherwise
2232
      megabytes); this is a one-letter string
2233

2234
  """
2235
  if units is None:
2236
    if separator:
2237
      units = "m"
2238
    else:
2239
      units = "h"
2240

    
2241
  if numfields is None:
2242
    numfields = []
2243
  if unitfields is None:
2244
    unitfields = []
2245

    
2246
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2247
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2248

    
2249
  format_fields = []
2250
  for field in fields:
2251
    if headers and field not in headers:
2252
      # TODO: handle better unknown fields (either revert to old
2253
      # style of raising exception, or deal more intelligently with
2254
      # variable fields)
2255
      headers[field] = field
2256
    if separator is not None:
2257
      format_fields.append("%s")
2258
    elif numfields.Matches(field):
2259
      format_fields.append("%*s")
2260
    else:
2261
      format_fields.append("%-*s")
2262

    
2263
  if separator is None:
2264
    mlens = [0 for name in fields]
2265
    format_str = ' '.join(format_fields)
2266
  else:
2267
    format_str = separator.replace("%", "%%").join(format_fields)
2268

    
2269
  for row in data:
2270
    if row is None:
2271
      continue
2272
    for idx, val in enumerate(row):
2273
      if unitfields.Matches(fields[idx]):
2274
        try:
2275
          val = int(val)
2276
        except (TypeError, ValueError):
2277
          pass
2278
        else:
2279
          val = row[idx] = utils.FormatUnit(val, units)
2280
      val = row[idx] = str(val)
2281
      if separator is None:
2282
        mlens[idx] = max(mlens[idx], len(val))
2283

    
2284
  result = []
2285
  if headers:
2286
    args = []
2287
    for idx, name in enumerate(fields):
2288
      hdr = headers[name]
2289
      if separator is None:
2290
        mlens[idx] = max(mlens[idx], len(hdr))
2291
        args.append(mlens[idx])
2292
      args.append(hdr)
2293
    result.append(format_str % tuple(args))
2294

    
2295
  if separator is None:
2296
    assert len(mlens) == len(fields)
2297

    
2298
    if fields and not numfields.Matches(fields[-1]):
2299
      mlens[-1] = 0
2300

    
2301
  for line in data:
2302
    args = []
2303
    if line is None:
2304
      line = ['-' for _ in fields]
2305
    for idx in range(len(fields)):
2306
      if separator is None:
2307
        args.append(mlens[idx])
2308
      args.append(line[idx])
2309
    result.append(format_str % tuple(args))
2310

    
2311
  return result
2312

    
2313

    
2314
def _FormatBool(value):
2315
  """Formats a boolean value as a string.
2316

2317
  """
2318
  if value:
2319
    return "Y"
2320
  return "N"
2321

    
2322

    
2323
#: Default formatting for query results; (callback, align right)
2324
_DEFAULT_FORMAT_QUERY = {
2325
  constants.QFT_TEXT: (str, False),
2326
  constants.QFT_BOOL: (_FormatBool, False),
2327
  constants.QFT_NUMBER: (str, True),
2328
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2329
  constants.QFT_OTHER: (str, False),
2330
  constants.QFT_UNKNOWN: (str, False),
2331
  }
2332

    
2333

    
2334
def _GetColumnFormatter(fdef, override, unit):
2335
  """Returns formatting function for a field.
2336

2337
  @type fdef: L{objects.QueryFieldDefinition}
2338
  @type override: dict
2339
  @param override: Dictionary for overriding field formatting functions,
2340
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2341
  @type unit: string
2342
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2343
  @rtype: tuple; (callable, bool)
2344
  @return: Returns the function to format a value (takes one parameter) and a
2345
    boolean for aligning the value on the right-hand side
2346

2347
  """
2348
  fmt = override.get(fdef.name, None)
2349
  if fmt is not None:
2350
    return fmt
2351

    
2352
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2353

    
2354
  if fdef.kind == constants.QFT_UNIT:
2355
    # Can't keep this information in the static dictionary
2356
    return (lambda value: utils.FormatUnit(value, unit), True)
2357

    
2358
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2359
  if fmt is not None:
2360
    return fmt
2361

    
2362
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2363

    
2364

    
2365
class _QueryColumnFormatter:
2366
  """Callable class for formatting fields of a query.
2367

2368
  """
2369
  def __init__(self, fn, status_fn):
2370
    """Initializes this class.
2371

2372
    @type fn: callable
2373
    @param fn: Formatting function
2374
    @type status_fn: callable
2375
    @param status_fn: Function to report fields' status
2376

2377
    """
2378
    self._fn = fn
2379
    self._status_fn = status_fn
2380

    
2381
  def __call__(self, data):
2382
    """Returns a field's string representation.
2383

2384
    """
2385
    (status, value) = data
2386

    
2387
    # Report status
2388
    self._status_fn(status)
2389

    
2390
    if status == constants.QRFS_NORMAL:
2391
      return self._fn(value)
2392

    
2393
    assert value is None, \
2394
           "Found value %r for abnormal status %s" % (value, status)
2395

    
2396
    if status == constants.QRFS_UNKNOWN:
2397
      return "(unknown)"
2398

    
2399
    if status == constants.QRFS_NODATA:
2400
      return "(nodata)"
2401

    
2402
    if status == constants.QRFS_UNAVAIL:
2403
      return "(unavail)"
2404

    
2405
    if status == constants.QRFS_OFFLINE:
2406
      return "(offline)"
2407

    
2408
    raise NotImplementedError("Unknown status %s" % status)
2409

    
2410

    
2411
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2412
                      header=False):
2413
  """Formats data in L{objects.QueryResponse}.
2414

2415
  @type result: L{objects.QueryResponse}
2416
  @param result: result of query operation
2417
  @type unit: string
2418
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2419
    see L{utils.text.FormatUnit}
2420
  @type format_override: dict
2421
  @param format_override: Dictionary for overriding field formatting functions,
2422
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2423
  @type separator: string or None
2424
  @param separator: String used to separate fields
2425
  @type header: bool
2426
  @param header: Whether to output header row
2427

2428
  """
2429
  if unit is None:
2430
    if separator:
2431
      unit = "m"
2432
    else:
2433
      unit = "h"
2434

    
2435
  if format_override is None:
2436
    format_override = {}
2437

    
2438
  stats = dict.fromkeys(constants.QRFS_ALL, 0)
2439

    
2440
  def _RecordStatus(status):
2441
    if status in stats:
2442
      stats[status] += 1
2443

    
2444
  columns = []
2445
  for fdef in result.fields:
2446
    assert fdef.title and fdef.name
2447
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2448
    columns.append(TableColumn(fdef.title,
2449
                               _QueryColumnFormatter(fn, _RecordStatus),
2450
                               align_right))
2451

    
2452
  table = FormatTable(result.data, columns, header, separator)
2453

    
2454
  # Collect statistics
2455
  assert len(stats) == len(constants.QRFS_ALL)
2456
  assert compat.all(count >= 0 for count in stats.values())
2457

    
2458
  # Determine overall status. If there was no data, unknown fields must be
2459
  # detected via the field definitions.
2460
  if (stats[constants.QRFS_UNKNOWN] or
2461
      (not result.data and _GetUnknownFields(result.fields))):
2462
    status = QR_UNKNOWN
2463
  elif compat.any(count > 0 for key, count in stats.items()
2464
                  if key != constants.QRFS_NORMAL):
2465
    status = QR_INCOMPLETE
2466
  else:
2467
    status = QR_NORMAL
2468

    
2469
  return (status, table)
2470

    
2471

    
2472
def _GetUnknownFields(fdefs):
2473
  """Returns list of unknown fields included in C{fdefs}.
2474

2475
  @type fdefs: list of L{objects.QueryFieldDefinition}
2476

2477
  """
2478
  return [fdef for fdef in fdefs
2479
          if fdef.kind == constants.QFT_UNKNOWN]
2480

    
2481

    
2482
def _WarnUnknownFields(fdefs):
2483
  """Prints a warning to stderr if a query included unknown fields.
2484

2485
  @type fdefs: list of L{objects.QueryFieldDefinition}
2486

2487
  """
2488
  unknown = _GetUnknownFields(fdefs)
2489
  if unknown:
2490
    ToStderr("Warning: Queried for unknown fields %s",
2491
             utils.CommaJoin(fdef.name for fdef in unknown))
2492
    return True
2493

    
2494
  return False
2495

    
2496

    
2497
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2498
                format_override=None):
2499
  """Generic implementation for listing all items of a resource.
2500

2501
  @param resource: One of L{constants.QR_OP_LUXI}
2502
  @type fields: list of strings
2503
  @param fields: List of fields to query for
2504
  @type names: list of strings
2505
  @param names: Names of items to query for
2506
  @type unit: string or None
2507
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2508
    None for automatic choice (human-readable for non-separator usage,
2509
    otherwise megabytes); this is a one-letter string
2510
  @type separator: string or None
2511
  @param separator: String used to separate fields
2512
  @type header: bool
2513
  @param header: Whether to show header row
2514
  @type format_override: dict
2515
  @param format_override: Dictionary for overriding field formatting functions,
2516
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2517

2518
  """
2519
  if cl is None:
2520
    cl = GetClient()
2521

    
2522
  if not names:
2523
    names = None
2524

    
2525
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2526

    
2527
  found_unknown = _WarnUnknownFields(response.fields)
2528

    
2529
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2530
                                     header=header,
2531
                                     format_override=format_override)
2532

    
2533
  for line in data:
2534
    ToStdout(line)
2535

    
2536
  assert ((found_unknown and status == QR_UNKNOWN) or
2537
          (not found_unknown and status != QR_UNKNOWN))
2538

    
2539
  if status == QR_UNKNOWN:
2540
    return constants.EXIT_UNKNOWN_FIELD
2541

    
2542
  # TODO: Should the list command fail if not all data could be collected?
2543
  return constants.EXIT_SUCCESS
2544

    
2545

    
2546
def GenericListFields(resource, fields, separator, header, cl=None):
2547
  """Generic implementation for listing fields for a resource.
2548

2549
  @param resource: One of L{constants.QR_OP_LUXI}
2550
  @type fields: list of strings
2551
  @param fields: List of fields to query for
2552
  @type separator: string or None
2553
  @param separator: String used to separate fields
2554
  @type header: bool
2555
  @param header: Whether to show header row
2556

2557
  """
2558
  if cl is None:
2559
    cl = GetClient()
2560

    
2561
  if not fields:
2562
    fields = None
2563

    
2564
  response = cl.QueryFields(resource, fields)
2565

    
2566
  found_unknown = _WarnUnknownFields(response.fields)
2567

    
2568
  columns = [
2569
    TableColumn("Name", str, False),
2570
    TableColumn("Title", str, False),
2571
    # TODO: Add field description to master daemon
2572
    ]
2573

    
2574
  rows = [[fdef.name, fdef.title] for fdef in response.fields]
2575

    
2576
  for line in FormatTable(rows, columns, header, separator):
2577
    ToStdout(line)
2578

    
2579
  if found_unknown:
2580
    return constants.EXIT_UNKNOWN_FIELD
2581

    
2582
  return constants.EXIT_SUCCESS
2583

    
2584

    
2585
class TableColumn:
2586
  """Describes a column for L{FormatTable}.
2587

2588
  """
2589
  def __init__(self, title, fn, align_right):
2590
    """Initializes this class.
2591

2592
    @type title: string
2593
    @param title: Column title
2594
    @type fn: callable
2595
    @param fn: Formatting function
2596
    @type align_right: bool
2597
    @param align_right: Whether to align values on the right-hand side
2598

2599
    """
2600
    self.title = title
2601
    self.format = fn
2602
    self.align_right = align_right
2603

    
2604

    
2605
def _GetColFormatString(width, align_right):
2606
  """Returns the format string for a field.
2607

2608
  """
2609
  if align_right:
2610
    sign = ""
2611
  else:
2612
    sign = "-"
2613

    
2614
  return "%%%s%ss" % (sign, width)
2615

    
2616

    
2617
def FormatTable(rows, columns, header, separator):
2618
  """Formats data as a table.
2619

2620
  @type rows: list of lists
2621
  @param rows: Row data, one list per row
2622
  @type columns: list of L{TableColumn}
2623
  @param columns: Column descriptions
2624
  @type header: bool
2625
  @param header: Whether to show header row
2626
  @type separator: string or None
2627
  @param separator: String used to separate columns
2628

2629
  """
2630
  if header:
2631
    data = [[col.title for col in columns]]
2632
    colwidth = [len(col.title) for col in columns]
2633
  else:
2634
    data = []
2635
    colwidth = [0 for _ in columns]
2636

    
2637
  # Format row data
2638
  for row in rows:
2639
    assert len(row) == len(columns)
2640

    
2641
    formatted = [col.format(value) for value, col in zip(row, columns)]
2642

    
2643
    if separator is None:
2644
      # Update column widths
2645
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2646
        # Modifying a list's items while iterating is fine
2647
        colwidth[idx] = max(oldwidth, len(value))
2648

    
2649
    data.append(formatted)
2650

    
2651
  if separator is not None:
2652
    # Return early if a separator is used
2653
    return [separator.join(row) for row in data]
2654

    
2655
  if columns and not columns[-1].align_right:
2656
    # Avoid unnecessary spaces at end of line
2657
    colwidth[-1] = 0
2658

    
2659
  # Build format string
2660
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2661
                  for col, width in zip(columns, colwidth)])
2662

    
2663
  return [fmt % tuple(row) for row in data]
2664

    
2665

    
2666
def FormatTimestamp(ts):
2667
  """Formats a given timestamp.
2668

2669
  @type ts: timestamp
2670
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2671

2672
  @rtype: string
2673
  @return: a string with the formatted timestamp
2674

2675
  """
2676
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2677
    return '?'
2678
  sec, usec = ts
2679
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2680

    
2681

    
2682
def ParseTimespec(value):
2683
  """Parse a time specification.
2684

2685
  The following suffixed will be recognized:
2686

2687
    - s: seconds
2688
    - m: minutes
2689
    - h: hours
2690
    - d: day
2691
    - w: weeks
2692

2693
  Without any suffix, the value will be taken to be in seconds.
2694

2695
  """
2696
  value = str(value)
2697
  if not value:
2698
    raise errors.OpPrereqError("Empty time specification passed")
2699
  suffix_map = {
2700
    's': 1,
2701
    'm': 60,
2702
    'h': 3600,
2703
    'd': 86400,
2704
    'w': 604800,
2705
    }
2706
  if value[-1] not in suffix_map:
2707
    try:
2708
      value = int(value)
2709
    except (TypeError, ValueError):
2710
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2711
  else:
2712
    multiplier = suffix_map[value[-1]]
2713
    value = value[:-1]
2714
    if not value: # no data left after stripping the suffix
2715
      raise errors.OpPrereqError("Invalid time specification (only"
2716
                                 " suffix passed)")
2717
    try:
2718
      value = int(value) * multiplier
2719
    except (TypeError, ValueError):
2720
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2721
  return value
2722

    
2723

    
2724
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2725
                   filter_master=False):
2726
  """Returns the names of online nodes.
2727

2728
  This function will also log a warning on stderr with the names of
2729
  the online nodes.
2730

2731
  @param nodes: if not empty, use only this subset of nodes (minus the
2732
      offline ones)
2733
  @param cl: if not None, luxi client to use
2734
  @type nowarn: boolean
2735
  @param nowarn: by default, this function will output a note with the
2736
      offline nodes that are skipped; if this parameter is True the
2737
      note is not displayed
2738
  @type secondary_ips: boolean
2739
  @param secondary_ips: if True, return the secondary IPs instead of the
2740
      names, useful for doing network traffic over the replication interface
2741
      (if any)
2742
  @type filter_master: boolean
2743
  @param filter_master: if True, do not return the master node in the list
2744
      (useful in coordination with secondary_ips where we cannot check our
2745
      node name against the list)
2746

2747
  """
2748
  if cl is None:
2749
    cl = GetClient()
2750

    
2751
  if secondary_ips:
2752
    name_idx = 2
2753
  else:
2754
    name_idx = 0
2755

    
2756
  if filter_master:
2757
    master_node = cl.QueryConfigValues(["master_node"])[0]
2758
    filter_fn = lambda x: x != master_node
2759
  else:
2760
    filter_fn = lambda _: True
2761

    
2762
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2763
                         use_locking=False)
2764
  offline = [row[0] for row in result if row[1]]
2765
  if offline and not nowarn:
2766
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2767
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2768

    
2769

    
2770
def _ToStream(stream, txt, *args):
2771
  """Write a message to a stream, bypassing the logging system
2772

2773
  @type stream: file object
2774
  @param stream: the file to which we should write
2775
  @type txt: str
2776
  @param txt: the message
2777

2778
  """
2779
  if args:
2780
    args = tuple(args)
2781
    stream.write(txt % args)
2782
  else:
2783
    stream.write(txt)
2784
  stream.write('\n')
2785
  stream.flush()
2786

    
2787

    
2788
def ToStdout(txt, *args):
2789
  """Write a message to stdout only, bypassing the logging system
2790

2791
  This is just a wrapper over _ToStream.
2792

2793
  @type txt: str
2794
  @param txt: the message
2795

2796
  """
2797
  _ToStream(sys.stdout, txt, *args)
2798

    
2799

    
2800
def ToStderr(txt, *args):
2801
  """Write a message to stderr only, bypassing the logging system
2802

2803
  This is just a wrapper over _ToStream.
2804

2805
  @type txt: str
2806
  @param txt: the message
2807

2808
  """
2809
  _ToStream(sys.stderr, txt, *args)
2810

    
2811

    
2812
class JobExecutor(object):
2813
  """Class which manages the submission and execution of multiple jobs.
2814

2815
  Note that instances of this class should not be reused between
2816
  GetResults() calls.
2817

2818
  """
2819
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2820
    self.queue = []
2821
    if cl is None:
2822
      cl = GetClient()
2823
    self.cl = cl
2824
    self.verbose = verbose
2825
    self.jobs = []
2826
    self.opts = opts
2827
    self.feedback_fn = feedback_fn
2828

    
2829
  def QueueJob(self, name, *ops):
2830
    """Record a job for later submit.
2831

2832
    @type name: string
2833
    @param name: a description of the job, will be used in WaitJobSet
2834
    """
2835
    SetGenericOpcodeOpts(ops, self.opts)
2836
    self.queue.append((name, ops))
2837

    
2838
  def SubmitPending(self, each=False):
2839
    """Submit all pending jobs.
2840

2841
    """
2842
    if each:
2843
      results = []
2844
      for row in self.queue:
2845
        # SubmitJob will remove the success status, but raise an exception if
2846
        # the submission fails, so we'll notice that anyway.
2847
        results.append([True, self.cl.SubmitJob(row[1])])
2848
    else:
2849
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2850
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2851
                                                            self.queue)):
2852
      self.jobs.append((idx, status, data, name))
2853

    
2854
  def _ChooseJob(self):
2855
    """Choose a non-waiting/queued job to poll next.
2856

2857
    """
2858
    assert self.jobs, "_ChooseJob called with empty job list"
2859

    
2860
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2861
    assert result
2862

    
2863
    for job_data, status in zip(self.jobs, result):
2864
      if (isinstance(status, list) and status and
2865
          status[0] in (constants.JOB_STATUS_QUEUED,
2866
                        constants.JOB_STATUS_WAITLOCK,
2867
                        constants.JOB_STATUS_CANCELING)):
2868
        # job is still present and waiting
2869
        continue
2870
      # good candidate found (either running job or lost job)
2871
      self.jobs.remove(job_data)
2872
      return job_data
2873

    
2874
    # no job found
2875
    return self.jobs.pop(0)
2876

    
2877
  def GetResults(self):
2878
    """Wait for and return the results of all jobs.
2879

2880
    @rtype: list
2881
    @return: list of tuples (success, job results), in the same order
2882
        as the submitted jobs; if a job has failed, instead of the result
2883
        there will be the error message
2884

2885
    """
2886
    if not self.jobs:
2887
      self.SubmitPending()
2888
    results = []
2889
    if self.verbose:
2890
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2891
      if ok_jobs:
2892
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2893

    
2894
    # first, remove any non-submitted jobs
2895
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2896
    for idx, _, jid, name in failures:
2897
      ToStderr("Failed to submit job for %s: %s", name, jid)
2898
      results.append((idx, False, jid))
2899

    
2900
    while self.jobs:
2901
      (idx, _, jid, name) = self._ChooseJob()
2902
      ToStdout("Waiting for job %s for %s...", jid, name)
2903
      try:
2904
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2905
        success = True
2906
      except errors.JobLost, err:
2907
        _, job_result = FormatError(err)
2908
        ToStderr("Job %s for %s has been archived, cannot check its result",
2909
                 jid, name)
2910
        success = False
2911
      except (errors.GenericError, luxi.ProtocolError), err:
2912
        _, job_result = FormatError(err)
2913
        success = False
2914
        # the error message will always be shown, verbose or not
2915
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2916

    
2917
      results.append((idx, success, job_result))
2918

    
2919
    # sort based on the index, then drop it
2920
    results.sort()
2921
    results = [i[1:] for i in results]
2922

    
2923
    return results
2924

    
2925
  def WaitOrShow(self, wait):
2926
    """Wait for job results or only print the job IDs.
2927

2928
    @type wait: boolean
2929
    @param wait: whether to wait or not
2930

2931
    """
2932
    if wait:
2933
      return self.GetResults()
2934
    else:
2935
      if not self.jobs:
2936
        self.SubmitPending()
2937
      for _, status, result, name in self.jobs:
2938
        if status:
2939
          ToStdout("%s: %s", result, name)
2940
        else:
2941
          ToStderr("Failure for %s: %s", name, result)
2942
      return [row[1:3] for row in self.jobs]