Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ acd19189

History | View | Annotate | Download (93.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "AUTO_PROMOTE_OPT",
55
  "AUTO_REPLACE_OPT",
56
  "BACKEND_OPT",
57
  "BLK_OS_OPT",
58
  "CAPAB_MASTER_OPT",
59
  "CAPAB_VM_OPT",
60
  "CLEANUP_OPT",
61
  "CLUSTER_DOMAIN_SECRET_OPT",
62
  "CONFIRM_OPT",
63
  "CP_SIZE_OPT",
64
  "DEBUG_OPT",
65
  "DEBUG_SIMERR_OPT",
66
  "DISKIDX_OPT",
67
  "DISK_OPT",
68
  "DISK_TEMPLATE_OPT",
69
  "DRAINED_OPT",
70
  "DRY_RUN_OPT",
71
  "DRBD_HELPER_OPT",
72
  "EARLY_RELEASE_OPT",
73
  "ENABLED_HV_OPT",
74
  "ERROR_CODES_OPT",
75
  "FIELDS_OPT",
76
  "FILESTORE_DIR_OPT",
77
  "FILESTORE_DRIVER_OPT",
78
  "FORCE_OPT",
79
  "FORCE_VARIANT_OPT",
80
  "GLOBAL_FILEDIR_OPT",
81
  "HID_OS_OPT",
82
  "HVLIST_OPT",
83
  "HVOPTS_OPT",
84
  "HYPERVISOR_OPT",
85
  "IALLOCATOR_OPT",
86
  "DEFAULT_IALLOCATOR_OPT",
87
  "IDENTIFY_DEFAULTS_OPT",
88
  "IGNORE_CONSIST_OPT",
89
  "IGNORE_FAILURES_OPT",
90
  "IGNORE_OFFLINE_OPT",
91
  "IGNORE_REMOVE_FAILURES_OPT",
92
  "IGNORE_SECONDARIES_OPT",
93
  "IGNORE_SIZE_OPT",
94
  "INTERVAL_OPT",
95
  "MAC_PREFIX_OPT",
96
  "MAINTAIN_NODE_HEALTH_OPT",
97
  "MASTER_NETDEV_OPT",
98
  "MC_OPT",
99
  "MIGRATION_MODE_OPT",
100
  "NET_OPT",
101
  "NEW_CLUSTER_CERT_OPT",
102
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103
  "NEW_CONFD_HMAC_KEY_OPT",
104
  "NEW_RAPI_CERT_OPT",
105
  "NEW_SECONDARY_OPT",
106
  "NIC_PARAMS_OPT",
107
  "NODE_LIST_OPT",
108
  "NODE_PLACEMENT_OPT",
109
  "NODEGROUP_OPT",
110
  "NODE_PARAMS_OPT",
111
  "NODE_POWERED_OPT",
112
  "NODRBD_STORAGE_OPT",
113
  "NOHDR_OPT",
114
  "NOIPCHECK_OPT",
115
  "NO_INSTALL_OPT",
116
  "NONAMECHECK_OPT",
117
  "NOLVM_STORAGE_OPT",
118
  "NOMODIFY_ETCHOSTS_OPT",
119
  "NOMODIFY_SSH_SETUP_OPT",
120
  "NONICS_OPT",
121
  "NONLIVE_OPT",
122
  "NONPLUS1_OPT",
123
  "NOSHUTDOWN_OPT",
124
  "NOSTART_OPT",
125
  "NOSSH_KEYCHECK_OPT",
126
  "NOVOTING_OPT",
127
  "NWSYNC_OPT",
128
  "ON_PRIMARY_OPT",
129
  "ON_SECONDARY_OPT",
130
  "OFFLINE_OPT",
131
  "OSPARAMS_OPT",
132
  "OS_OPT",
133
  "OS_SIZE_OPT",
134
  "PREALLOC_WIPE_DISKS_OPT",
135
  "PRIMARY_IP_VERSION_OPT",
136
  "PRIORITY_OPT",
137
  "RAPI_CERT_OPT",
138
  "READD_OPT",
139
  "REBOOT_TYPE_OPT",
140
  "REMOVE_INSTANCE_OPT",
141
  "REMOVE_UIDS_OPT",
142
  "RESERVED_LVS_OPT",
143
  "ROMAN_OPT",
144
  "SECONDARY_IP_OPT",
145
  "SELECT_OS_OPT",
146
  "SEP_OPT",
147
  "SHOWCMD_OPT",
148
  "SHUTDOWN_TIMEOUT_OPT",
149
  "SINGLE_NODE_OPT",
150
  "SRC_DIR_OPT",
151
  "SRC_NODE_OPT",
152
  "SUBMIT_OPT",
153
  "STATIC_OPT",
154
  "SYNC_OPT",
155
  "TAG_SRC_OPT",
156
  "TIMEOUT_OPT",
157
  "UIDPOOL_OPT",
158
  "USEUNITS_OPT",
159
  "USE_REPL_NET_OPT",
160
  "VERBOSE_OPT",
161
  "VG_NAME_OPT",
162
  "YES_DOIT_OPT",
163
  # Generic functions for CLI programs
164
  "GenericMain",
165
  "GenericInstanceCreate",
166
  "GenericList",
167
  "GenericListFields",
168
  "GetClient",
169
  "GetOnlineNodes",
170
  "JobExecutor",
171
  "JobSubmittedException",
172
  "ParseTimespec",
173
  "RunWhileClusterStopped",
174
  "SubmitOpCode",
175
  "SubmitOrSend",
176
  "UsesRPC",
177
  # Formatting functions
178
  "ToStderr", "ToStdout",
179
  "FormatError",
180
  "FormatQueryResult",
181
  "FormatParameterDict",
182
  "GenerateTable",
183
  "AskUser",
184
  "FormatTimestamp",
185
  "FormatLogMessage",
186
  # Tags functions
187
  "ListTags",
188
  "AddTags",
189
  "RemoveTags",
190
  # command line options support infrastructure
191
  "ARGS_MANY_INSTANCES",
192
  "ARGS_MANY_NODES",
193
  "ARGS_MANY_GROUPS",
194
  "ARGS_NONE",
195
  "ARGS_ONE_INSTANCE",
196
  "ARGS_ONE_NODE",
197
  "ARGS_ONE_GROUP",
198
  "ARGS_ONE_OS",
199
  "ArgChoice",
200
  "ArgCommand",
201
  "ArgFile",
202
  "ArgGroup",
203
  "ArgHost",
204
  "ArgInstance",
205
  "ArgJobId",
206
  "ArgNode",
207
  "ArgOs",
208
  "ArgSuggest",
209
  "ArgUnknown",
210
  "OPT_COMPL_INST_ADD_NODES",
211
  "OPT_COMPL_MANY_NODES",
212
  "OPT_COMPL_ONE_IALLOCATOR",
213
  "OPT_COMPL_ONE_INSTANCE",
214
  "OPT_COMPL_ONE_NODE",
215
  "OPT_COMPL_ONE_NODEGROUP",
216
  "OPT_COMPL_ONE_OS",
217
  "cli_option",
218
  "SplitNodeOption",
219
  "CalculateOSNames",
220
  "ParseFields",
221
  "COMMON_CREATE_OPTS",
222
  ]
223

    
224
NO_PREFIX = "no_"
225
UN_PREFIX = "-"
226

    
227
#: Priorities (sorted)
228
_PRIORITY_NAMES = [
229
  ("low", constants.OP_PRIO_LOW),
230
  ("normal", constants.OP_PRIO_NORMAL),
231
  ("high", constants.OP_PRIO_HIGH),
232
  ]
233

    
234
#: Priority dictionary for easier lookup
235
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
236
# we migrate to Python 2.6
237
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
238

    
239
# Query result status for clients
240
(QR_NORMAL,
241
 QR_UNKNOWN,
242
 QR_INCOMPLETE) = range(3)
243

    
244

    
245
class _Argument:
246
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
247
    self.min = min
248
    self.max = max
249

    
250
  def __repr__(self):
251
    return ("<%s min=%s max=%s>" %
252
            (self.__class__.__name__, self.min, self.max))
253

    
254

    
255
class ArgSuggest(_Argument):
256
  """Suggesting argument.
257

258
  Value can be any of the ones passed to the constructor.
259

260
  """
261
  # pylint: disable-msg=W0622
262
  def __init__(self, min=0, max=None, choices=None):
263
    _Argument.__init__(self, min=min, max=max)
264
    self.choices = choices
265

    
266
  def __repr__(self):
267
    return ("<%s min=%s max=%s choices=%r>" %
268
            (self.__class__.__name__, self.min, self.max, self.choices))
269

    
270

    
271
class ArgChoice(ArgSuggest):
272
  """Choice argument.
273

274
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
275
  but value must be one of the choices.
276

277
  """
278

    
279

    
280
class ArgUnknown(_Argument):
281
  """Unknown argument to program (e.g. determined at runtime).
282

283
  """
284

    
285

    
286
class ArgInstance(_Argument):
287
  """Instances argument.
288

289
  """
290

    
291

    
292
class ArgNode(_Argument):
293
  """Node argument.
294

295
  """
296

    
297

    
298
class ArgGroup(_Argument):
299
  """Node group argument.
300

301
  """
302

    
303

    
304
class ArgJobId(_Argument):
305
  """Job ID argument.
306

307
  """
308

    
309

    
310
class ArgFile(_Argument):
311
  """File path argument.
312

313
  """
314

    
315

    
316
class ArgCommand(_Argument):
317
  """Command argument.
318

319
  """
320

    
321

    
322
class ArgHost(_Argument):
323
  """Host argument.
324

325
  """
326

    
327

    
328
class ArgOs(_Argument):
329
  """OS argument.
330

331
  """
332

    
333

    
334
ARGS_NONE = []
335
ARGS_MANY_INSTANCES = [ArgInstance()]
336
ARGS_MANY_NODES = [ArgNode()]
337
ARGS_MANY_GROUPS = [ArgGroup()]
338
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
339
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
340
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
341
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
342

    
343

    
344
def _ExtractTagsObject(opts, args):
345
  """Extract the tag type object.
346

347
  Note that this function will modify its args parameter.
348

349
  """
350
  if not hasattr(opts, "tag_type"):
351
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
352
  kind = opts.tag_type
353
  if kind == constants.TAG_CLUSTER:
354
    retval = kind, kind
355
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
356
    if not args:
357
      raise errors.OpPrereqError("no arguments passed to the command")
358
    name = args.pop(0)
359
    retval = kind, name
360
  else:
361
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
362
  return retval
363

    
364

    
365
def _ExtendTags(opts, args):
366
  """Extend the args if a source file has been given.
367

368
  This function will extend the tags with the contents of the file
369
  passed in the 'tags_source' attribute of the opts parameter. A file
370
  named '-' will be replaced by stdin.
371

372
  """
373
  fname = opts.tags_source
374
  if fname is None:
375
    return
376
  if fname == "-":
377
    new_fh = sys.stdin
378
  else:
379
    new_fh = open(fname, "r")
380
  new_data = []
381
  try:
382
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
383
    # because of python bug 1633941
384
    while True:
385
      line = new_fh.readline()
386
      if not line:
387
        break
388
      new_data.append(line.strip())
389
  finally:
390
    new_fh.close()
391
  args.extend(new_data)
392

    
393

    
394
def ListTags(opts, args):
395
  """List the tags on a given object.
396

397
  This is a generic implementation that knows how to deal with all
398
  three cases of tag objects (cluster, node, instance). The opts
399
  argument is expected to contain a tag_type field denoting what
400
  object type we work on.
401

402
  """
403
  kind, name = _ExtractTagsObject(opts, args)
404
  cl = GetClient()
405
  result = cl.QueryTags(kind, name)
406
  result = list(result)
407
  result.sort()
408
  for tag in result:
409
    ToStdout(tag)
410

    
411

    
412
def AddTags(opts, args):
413
  """Add tags on a given object.
414

415
  This is a generic implementation that knows how to deal with all
416
  three cases of tag objects (cluster, node, instance). The opts
417
  argument is expected to contain a tag_type field denoting what
418
  object type we work on.
419

420
  """
421
  kind, name = _ExtractTagsObject(opts, args)
422
  _ExtendTags(opts, args)
423
  if not args:
424
    raise errors.OpPrereqError("No tags to be added")
425
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
426
  SubmitOpCode(op, opts=opts)
427

    
428

    
429
def RemoveTags(opts, args):
430
  """Remove tags from a given object.
431

432
  This is a generic implementation that knows how to deal with all
433
  three cases of tag objects (cluster, node, instance). The opts
434
  argument is expected to contain a tag_type field denoting what
435
  object type we work on.
436

437
  """
438
  kind, name = _ExtractTagsObject(opts, args)
439
  _ExtendTags(opts, args)
440
  if not args:
441
    raise errors.OpPrereqError("No tags to be removed")
442
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
443
  SubmitOpCode(op, opts=opts)
444

    
445

    
446
def check_unit(option, opt, value): # pylint: disable-msg=W0613
447
  """OptParsers custom converter for units.
448

449
  """
450
  try:
451
    return utils.ParseUnit(value)
452
  except errors.UnitParseError, err:
453
    raise OptionValueError("option %s: %s" % (opt, err))
454

    
455

    
456
def _SplitKeyVal(opt, data):
457
  """Convert a KeyVal string into a dict.
458

459
  This function will convert a key=val[,...] string into a dict. Empty
460
  values will be converted specially: keys which have the prefix 'no_'
461
  will have the value=False and the prefix stripped, the others will
462
  have value=True.
463

464
  @type opt: string
465
  @param opt: a string holding the option name for which we process the
466
      data, used in building error messages
467
  @type data: string
468
  @param data: a string of the format key=val,key=val,...
469
  @rtype: dict
470
  @return: {key=val, key=val}
471
  @raises errors.ParameterError: if there are duplicate keys
472

473
  """
474
  kv_dict = {}
475
  if data:
476
    for elem in utils.UnescapeAndSplit(data, sep=","):
477
      if "=" in elem:
478
        key, val = elem.split("=", 1)
479
      else:
480
        if elem.startswith(NO_PREFIX):
481
          key, val = elem[len(NO_PREFIX):], False
482
        elif elem.startswith(UN_PREFIX):
483
          key, val = elem[len(UN_PREFIX):], None
484
        else:
485
          key, val = elem, True
486
      if key in kv_dict:
487
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
488
                                    (key, opt))
489
      kv_dict[key] = val
490
  return kv_dict
491

    
492

    
493
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
494
  """Custom parser for ident:key=val,key=val options.
495

496
  This will store the parsed values as a tuple (ident, {key: val}). As such,
497
  multiple uses of this option via action=append is possible.
498

499
  """
500
  if ":" not in value:
501
    ident, rest = value, ''
502
  else:
503
    ident, rest = value.split(":", 1)
504

    
505
  if ident.startswith(NO_PREFIX):
506
    if rest:
507
      msg = "Cannot pass options when removing parameter groups: %s" % value
508
      raise errors.ParameterError(msg)
509
    retval = (ident[len(NO_PREFIX):], False)
510
  elif ident.startswith(UN_PREFIX):
511
    if rest:
512
      msg = "Cannot pass options when removing parameter groups: %s" % value
513
      raise errors.ParameterError(msg)
514
    retval = (ident[len(UN_PREFIX):], None)
515
  else:
516
    kv_dict = _SplitKeyVal(opt, rest)
517
    retval = (ident, kv_dict)
518
  return retval
519

    
520

    
521
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
522
  """Custom parser class for key=val,key=val options.
523

524
  This will store the parsed values as a dict {key: val}.
525

526
  """
527
  return _SplitKeyVal(opt, value)
528

    
529

    
530
def check_bool(option, opt, value): # pylint: disable-msg=W0613
531
  """Custom parser for yes/no options.
532

533
  This will store the parsed value as either True or False.
534

535
  """
536
  value = value.lower()
537
  if value == constants.VALUE_FALSE or value == "no":
538
    return False
539
  elif value == constants.VALUE_TRUE or value == "yes":
540
    return True
541
  else:
542
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
543

    
544

    
545
# completion_suggestion is normally a list. Using numeric values not evaluating
546
# to False for dynamic completion.
547
(OPT_COMPL_MANY_NODES,
548
 OPT_COMPL_ONE_NODE,
549
 OPT_COMPL_ONE_INSTANCE,
550
 OPT_COMPL_ONE_OS,
551
 OPT_COMPL_ONE_IALLOCATOR,
552
 OPT_COMPL_INST_ADD_NODES,
553
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
554

    
555
OPT_COMPL_ALL = frozenset([
556
  OPT_COMPL_MANY_NODES,
557
  OPT_COMPL_ONE_NODE,
558
  OPT_COMPL_ONE_INSTANCE,
559
  OPT_COMPL_ONE_OS,
560
  OPT_COMPL_ONE_IALLOCATOR,
561
  OPT_COMPL_INST_ADD_NODES,
562
  OPT_COMPL_ONE_NODEGROUP,
563
  ])
564

    
565

    
566
class CliOption(Option):
567
  """Custom option class for optparse.
568

569
  """
570
  ATTRS = Option.ATTRS + [
571
    "completion_suggest",
572
    ]
573
  TYPES = Option.TYPES + (
574
    "identkeyval",
575
    "keyval",
576
    "unit",
577
    "bool",
578
    )
579
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
580
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
581
  TYPE_CHECKER["keyval"] = check_key_val
582
  TYPE_CHECKER["unit"] = check_unit
583
  TYPE_CHECKER["bool"] = check_bool
584

    
585

    
586
# optparse.py sets make_option, so we do it for our own option class, too
587
cli_option = CliOption
588

    
589

    
590
_YORNO = "yes|no"
591

    
592
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
593
                       help="Increase debugging level")
594

    
595
NOHDR_OPT = cli_option("--no-headers", default=False,
596
                       action="store_true", dest="no_headers",
597
                       help="Don't display column headers")
598

    
599
SEP_OPT = cli_option("--separator", default=None,
600
                     action="store", dest="separator",
601
                     help=("Separator between output fields"
602
                           " (defaults to one space)"))
603

    
604
USEUNITS_OPT = cli_option("--units", default=None,
605
                          dest="units", choices=('h', 'm', 'g', 't'),
606
                          help="Specify units for output (one of h/m/g/t)")
607

    
608
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
609
                        type="string", metavar="FIELDS",
610
                        help="Comma separated list of output fields")
611

    
612
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
613
                       default=False, help="Force the operation")
614

    
615
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
616
                         default=False, help="Do not require confirmation")
617

    
618
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
619
                                  action="store_true", default=False,
620
                                  help=("Ignore offline nodes and do as much"
621
                                        " as possible"))
622

    
623
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
624
                         default=None, help="File with tag names")
625

    
626
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
627
                        default=False, action="store_true",
628
                        help=("Submit the job and return the job ID, but"
629
                              " don't wait for the job to finish"))
630

    
631
SYNC_OPT = cli_option("--sync", dest="do_locking",
632
                      default=False, action="store_true",
633
                      help=("Grab locks while doing the queries"
634
                            " in order to ensure more consistent results"))
635

    
636
DRY_RUN_OPT = cli_option("--dry-run", default=False,
637
                         action="store_true",
638
                         help=("Do not execute the operation, just run the"
639
                               " check steps and verify it it could be"
640
                               " executed"))
641

    
642
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
643
                         action="store_true",
644
                         help="Increase the verbosity of the operation")
645

    
646
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
647
                              action="store_true", dest="simulate_errors",
648
                              help="Debugging option that makes the operation"
649
                              " treat most runtime checks as failed")
650

    
651
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
652
                        default=True, action="store_false",
653
                        help="Don't wait for sync (DANGEROUS!)")
654

    
655
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
656
                               help="Custom disk setup (diskless, file,"
657
                               " plain or drbd)",
658
                               default=None, metavar="TEMPL",
659
                               choices=list(constants.DISK_TEMPLATES))
660

    
661
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
662
                        help="Do not create any network cards for"
663
                        " the instance")
664

    
665
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
666
                               help="Relative path under default cluster-wide"
667
                               " file storage dir to store file-based disks",
668
                               default=None, metavar="<DIR>")
669

    
670
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
671
                                  help="Driver to use for image files",
672
                                  default="loop", metavar="<DRIVER>",
673
                                  choices=list(constants.FILE_DRIVER))
674

    
675
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
676
                            help="Select nodes for the instance automatically"
677
                            " using the <NAME> iallocator plugin",
678
                            default=None, type="string",
679
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
680

    
681
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
682
                            metavar="<NAME>",
683
                            help="Set the default instance allocator plugin",
684
                            default=None, type="string",
685
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
686

    
687
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
688
                    metavar="<os>",
689
                    completion_suggest=OPT_COMPL_ONE_OS)
690

    
691
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
692
                         type="keyval", default={},
693
                         help="OS parameters")
694

    
695
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
696
                               action="store_true", default=False,
697
                               help="Force an unknown variant")
698

    
699
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
700
                            action="store_true", default=False,
701
                            help="Do not install the OS (will"
702
                            " enable no-start)")
703

    
704
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
705
                         type="keyval", default={},
706
                         help="Backend parameters")
707

    
708
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
709
                         default={}, dest="hvparams",
710
                         help="Hypervisor parameters")
711

    
712
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
713
                            help="Hypervisor and hypervisor options, in the"
714
                            " format hypervisor:option=value,option=value,...",
715
                            default=None, type="identkeyval")
716

    
717
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
718
                        help="Hypervisor and hypervisor options, in the"
719
                        " format hypervisor:option=value,option=value,...",
720
                        default=[], action="append", type="identkeyval")
721

    
722
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
723
                           action="store_false",
724
                           help="Don't check that the instance's IP"
725
                           " is alive")
726

    
727
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
728
                             default=True, action="store_false",
729
                             help="Don't check that the instance's name"
730
                             " is resolvable")
731

    
732
NET_OPT = cli_option("--net",
733
                     help="NIC parameters", default=[],
734
                     dest="nics", action="append", type="identkeyval")
735

    
736
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
737
                      dest="disks", action="append", type="identkeyval")
738

    
739
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
740
                         help="Comma-separated list of disks"
741
                         " indices to act on (e.g. 0,2) (optional,"
742
                         " defaults to all disks)")
743

    
744
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
745
                         help="Enforces a single-disk configuration using the"
746
                         " given disk size, in MiB unless a suffix is used",
747
                         default=None, type="unit", metavar="<size>")
748

    
749
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
750
                                dest="ignore_consistency",
751
                                action="store_true", default=False,
752
                                help="Ignore the consistency of the disks on"
753
                                " the secondary")
754

    
755
NONLIVE_OPT = cli_option("--non-live", dest="live",
756
                         default=True, action="store_false",
757
                         help="Do a non-live migration (this usually means"
758
                         " freeze the instance, save the state, transfer and"
759
                         " only then resume running on the secondary node)")
760

    
761
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
762
                                default=None,
763
                                choices=list(constants.HT_MIGRATION_MODES),
764
                                help="Override default migration mode (choose"
765
                                " either live or non-live")
766

    
767
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
768
                                help="Target node and optional secondary node",
769
                                metavar="<pnode>[:<snode>]",
770
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
771

    
772
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
773
                           action="append", metavar="<node>",
774
                           help="Use only this node (can be used multiple"
775
                           " times, if not given defaults to all nodes)",
776
                           completion_suggest=OPT_COMPL_ONE_NODE)
777

    
778
NODEGROUP_OPT = cli_option("-g", "--node-group",
779
                           dest="nodegroup",
780
                           help="Node group (name or uuid)",
781
                           metavar="<nodegroup>",
782
                           default=None, type="string",
783
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
784

    
785
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
786
                             metavar="<node>",
787
                             completion_suggest=OPT_COMPL_ONE_NODE)
788

    
789
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
790
                         action="store_false",
791
                         help="Don't start the instance after creation")
792

    
793
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
794
                         action="store_true", default=False,
795
                         help="Show command instead of executing it")
796

    
797
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
798
                         default=False, action="store_true",
799
                         help="Instead of performing the migration, try to"
800
                         " recover from a failed cleanup. This is safe"
801
                         " to run even if the instance is healthy, but it"
802
                         " will create extra replication traffic and "
803
                         " disrupt briefly the replication (like during the"
804
                         " migration")
805

    
806
STATIC_OPT = cli_option("-s", "--static", dest="static",
807
                        action="store_true", default=False,
808
                        help="Only show configuration data, not runtime data")
809

    
810
ALL_OPT = cli_option("--all", dest="show_all",
811
                     default=False, action="store_true",
812
                     help="Show info on all instances on the cluster."
813
                     " This can take a long time to run, use wisely")
814

    
815
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
816
                           action="store_true", default=False,
817
                           help="Interactive OS reinstall, lists available"
818
                           " OS templates for selection")
819

    
820
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
821
                                 action="store_true", default=False,
822
                                 help="Remove the instance from the cluster"
823
                                 " configuration even if there are failures"
824
                                 " during the removal process")
825

    
826
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
827
                                        dest="ignore_remove_failures",
828
                                        action="store_true", default=False,
829
                                        help="Remove the instance from the"
830
                                        " cluster configuration even if there"
831
                                        " are failures during the removal"
832
                                        " process")
833

    
834
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
835
                                 action="store_true", default=False,
836
                                 help="Remove the instance from the cluster")
837

    
838
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
839
                               help="Specifies the new secondary node",
840
                               metavar="NODE", default=None,
841
                               completion_suggest=OPT_COMPL_ONE_NODE)
842

    
843
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
844
                            default=False, action="store_true",
845
                            help="Replace the disk(s) on the primary"
846
                            " node (only for the drbd template)")
847

    
848
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
849
                              default=False, action="store_true",
850
                              help="Replace the disk(s) on the secondary"
851
                              " node (only for the drbd template)")
852

    
853
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
854
                              default=False, action="store_true",
855
                              help="Lock all nodes and auto-promote as needed"
856
                              " to MC status")
857

    
858
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
859
                              default=False, action="store_true",
860
                              help="Automatically replace faulty disks"
861
                              " (only for the drbd template)")
862

    
863
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
864
                             default=False, action="store_true",
865
                             help="Ignore current recorded size"
866
                             " (useful for forcing activation when"
867
                             " the recorded size is wrong)")
868

    
869
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
870
                          metavar="<node>",
871
                          completion_suggest=OPT_COMPL_ONE_NODE)
872

    
873
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
874
                         metavar="<dir>")
875

    
876
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
877
                              help="Specify the secondary ip for the node",
878
                              metavar="ADDRESS", default=None)
879

    
880
READD_OPT = cli_option("--readd", dest="readd",
881
                       default=False, action="store_true",
882
                       help="Readd old node after replacing it")
883

    
884
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
885
                                default=True, action="store_false",
886
                                help="Disable SSH key fingerprint checking")
887

    
888

    
889
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
890
                    type="bool", default=None, metavar=_YORNO,
891
                    help="Set the master_candidate flag on the node")
892

    
893
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
894
                         type="bool", default=None,
895
                         help="Set the offline flag on the node")
896

    
897
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
898
                         type="bool", default=None,
899
                         help="Set the drained flag on the node")
900

    
901
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
902
                    type="bool", default=None, metavar=_YORNO,
903
                    help="Set the master_capable flag on the node")
904

    
905
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
906
                    type="bool", default=None, metavar=_YORNO,
907
                    help="Set the vm_capable flag on the node")
908

    
909
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
910
                             type="bool", default=None, metavar=_YORNO,
911
                             help="Set the allocatable flag on a volume")
912

    
913
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
914
                               help="Disable support for lvm based instances"
915
                               " (cluster-wide)",
916
                               action="store_false", default=True)
917

    
918
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
919
                            dest="enabled_hypervisors",
920
                            help="Comma-separated list of hypervisors",
921
                            type="string", default=None)
922

    
923
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
924
                            type="keyval", default={},
925
                            help="NIC parameters")
926

    
927
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
928
                         dest="candidate_pool_size", type="int",
929
                         help="Set the candidate pool size")
930

    
931
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
932
                         help="Enables LVM and specifies the volume group"
933
                         " name (cluster-wide) for disk allocation [xenvg]",
934
                         metavar="VG", default=None)
935

    
936
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
937
                          help="Destroy cluster", action="store_true")
938

    
939
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
940
                          help="Skip node agreement check (dangerous)",
941
                          action="store_true", default=False)
942

    
943
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
944
                            help="Specify the mac prefix for the instance IP"
945
                            " addresses, in the format XX:XX:XX",
946
                            metavar="PREFIX",
947
                            default=None)
948

    
949
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
950
                               help="Specify the node interface (cluster-wide)"
951
                               " on which the master IP address will be added"
952
                               " (cluster init default: %s)" %
953
                               constants.DEFAULT_BRIDGE,
954
                               metavar="NETDEV",
955
                               default=None)
956

    
957
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
958
                                help="Specify the default directory (cluster-"
959
                                "wide) for storing the file-based disks [%s]" %
960
                                constants.DEFAULT_FILE_STORAGE_DIR,
961
                                metavar="DIR",
962
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
963

    
964
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
965
                                   help="Don't modify /etc/hosts",
966
                                   action="store_false", default=True)
967

    
968
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
969
                                    help="Don't initialize SSH keys",
970
                                    action="store_false", default=True)
971

    
972
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
973
                             help="Enable parseable error messages",
974
                             action="store_true", default=False)
975

    
976
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
977
                          help="Skip N+1 memory redundancy tests",
978
                          action="store_true", default=False)
979

    
980
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
981
                             help="Type of reboot: soft/hard/full",
982
                             default=constants.INSTANCE_REBOOT_HARD,
983
                             metavar="<REBOOT>",
984
                             choices=list(constants.REBOOT_TYPES))
985

    
986
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
987
                                    dest="ignore_secondaries",
988
                                    default=False, action="store_true",
989
                                    help="Ignore errors from secondaries")
990

    
991
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
992
                            action="store_false", default=True,
993
                            help="Don't shutdown the instance (unsafe)")
994

    
995
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
996
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
997
                         help="Maximum time to wait")
998

    
999
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1000
                         dest="shutdown_timeout", type="int",
1001
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1002
                         help="Maximum time to wait for instance shutdown")
1003

    
1004
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1005
                          default=None,
1006
                          help=("Number of seconds between repetions of the"
1007
                                " command"))
1008

    
1009
EARLY_RELEASE_OPT = cli_option("--early-release",
1010
                               dest="early_release", default=False,
1011
                               action="store_true",
1012
                               help="Release the locks on the secondary"
1013
                               " node(s) early")
1014

    
1015
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1016
                                  dest="new_cluster_cert",
1017
                                  default=False, action="store_true",
1018
                                  help="Generate a new cluster certificate")
1019

    
1020
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1021
                           default=None,
1022
                           help="File containing new RAPI certificate")
1023

    
1024
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1025
                               default=None, action="store_true",
1026
                               help=("Generate a new self-signed RAPI"
1027
                                     " certificate"))
1028

    
1029
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1030
                                    dest="new_confd_hmac_key",
1031
                                    default=False, action="store_true",
1032
                                    help=("Create a new HMAC key for %s" %
1033
                                          constants.CONFD))
1034

    
1035
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1036
                                       dest="cluster_domain_secret",
1037
                                       default=None,
1038
                                       help=("Load new new cluster domain"
1039
                                             " secret from file"))
1040

    
1041
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1042
                                           dest="new_cluster_domain_secret",
1043
                                           default=False, action="store_true",
1044
                                           help=("Create a new cluster domain"
1045
                                                 " secret"))
1046

    
1047
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1048
                              dest="use_replication_network",
1049
                              help="Whether to use the replication network"
1050
                              " for talking to the nodes",
1051
                              action="store_true", default=False)
1052

    
1053
MAINTAIN_NODE_HEALTH_OPT = \
1054
    cli_option("--maintain-node-health", dest="maintain_node_health",
1055
               metavar=_YORNO, default=None, type="bool",
1056
               help="Configure the cluster to automatically maintain node"
1057
               " health, by shutting down unknown instances, shutting down"
1058
               " unknown DRBD devices, etc.")
1059

    
1060
IDENTIFY_DEFAULTS_OPT = \
1061
    cli_option("--identify-defaults", dest="identify_defaults",
1062
               default=False, action="store_true",
1063
               help="Identify which saved instance parameters are equal to"
1064
               " the current cluster defaults and set them as such, instead"
1065
               " of marking them as overridden")
1066

    
1067
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1068
                         action="store", dest="uid_pool",
1069
                         help=("A list of user-ids or user-id"
1070
                               " ranges separated by commas"))
1071

    
1072
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1073
                          action="store", dest="add_uids",
1074
                          help=("A list of user-ids or user-id"
1075
                                " ranges separated by commas, to be"
1076
                                " added to the user-id pool"))
1077

    
1078
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1079
                             action="store", dest="remove_uids",
1080
                             help=("A list of user-ids or user-id"
1081
                                   " ranges separated by commas, to be"
1082
                                   " removed from the user-id pool"))
1083

    
1084
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1085
                             action="store", dest="reserved_lvs",
1086
                             help=("A comma-separated list of reserved"
1087
                                   " logical volumes names, that will be"
1088
                                   " ignored by cluster verify"))
1089

    
1090
ROMAN_OPT = cli_option("--roman",
1091
                       dest="roman_integers", default=False,
1092
                       action="store_true",
1093
                       help="Use roman numbers for positive integers")
1094

    
1095
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1096
                             action="store", default=None,
1097
                             help="Specifies usermode helper for DRBD")
1098

    
1099
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1100
                                action="store_false", default=True,
1101
                                help="Disable support for DRBD")
1102

    
1103
PRIMARY_IP_VERSION_OPT = \
1104
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1105
               action="store", dest="primary_ip_version",
1106
               metavar="%d|%d" % (constants.IP4_VERSION,
1107
                                  constants.IP6_VERSION),
1108
               help="Cluster-wide IP version for primary IP")
1109

    
1110
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1111
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1112
                          choices=_PRIONAME_TO_VALUE.keys(),
1113
                          help="Priority for opcode processing")
1114

    
1115
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1116
                        type="bool", default=None, metavar=_YORNO,
1117
                        help="Sets the hidden flag on the OS")
1118

    
1119
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1120
                        type="bool", default=None, metavar=_YORNO,
1121
                        help="Sets the blacklisted flag on the OS")
1122

    
1123
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1124
                                     type="bool", metavar=_YORNO,
1125
                                     dest="prealloc_wipe_disks",
1126
                                     help=("Wipe disks prior to instance"
1127
                                           " creation"))
1128

    
1129
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1130
                             type="keyval", default=None,
1131
                             help="Node parameters")
1132

    
1133
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1134
                              action="store", metavar="POLICY", default=None,
1135
                              help="Allocation policy for the node group")
1136

    
1137
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1138
                              type="bool", metavar=_YORNO,
1139
                              dest="node_powered",
1140
                              help="Specify if the SoR for node is powered")
1141

    
1142

    
1143
#: Options provided by all commands
1144
COMMON_OPTS = [DEBUG_OPT]
1145

    
1146
# common options for creating instances. add and import then add their own
1147
# specific ones.
1148
COMMON_CREATE_OPTS = [
1149
  BACKEND_OPT,
1150
  DISK_OPT,
1151
  DISK_TEMPLATE_OPT,
1152
  FILESTORE_DIR_OPT,
1153
  FILESTORE_DRIVER_OPT,
1154
  HYPERVISOR_OPT,
1155
  IALLOCATOR_OPT,
1156
  NET_OPT,
1157
  NODE_PLACEMENT_OPT,
1158
  NOIPCHECK_OPT,
1159
  NONAMECHECK_OPT,
1160
  NONICS_OPT,
1161
  NWSYNC_OPT,
1162
  OSPARAMS_OPT,
1163
  OS_SIZE_OPT,
1164
  SUBMIT_OPT,
1165
  DRY_RUN_OPT,
1166
  PRIORITY_OPT,
1167
  ]
1168

    
1169

    
1170
def _ParseArgs(argv, commands, aliases):
1171
  """Parser for the command line arguments.
1172

1173
  This function parses the arguments and returns the function which
1174
  must be executed together with its (modified) arguments.
1175

1176
  @param argv: the command line
1177
  @param commands: dictionary with special contents, see the design
1178
      doc for cmdline handling
1179
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1180

1181
  """
1182
  if len(argv) == 0:
1183
    binary = "<command>"
1184
  else:
1185
    binary = argv[0].split("/")[-1]
1186

    
1187
  if len(argv) > 1 and argv[1] == "--version":
1188
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1189
             constants.RELEASE_VERSION)
1190
    # Quit right away. That way we don't have to care about this special
1191
    # argument. optparse.py does it the same.
1192
    sys.exit(0)
1193

    
1194
  if len(argv) < 2 or not (argv[1] in commands or
1195
                           argv[1] in aliases):
1196
    # let's do a nice thing
1197
    sortedcmds = commands.keys()
1198
    sortedcmds.sort()
1199

    
1200
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1201
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1202
    ToStdout("")
1203

    
1204
    # compute the max line length for cmd + usage
1205
    mlen = max([len(" %s" % cmd) for cmd in commands])
1206
    mlen = min(60, mlen) # should not get here...
1207

    
1208
    # and format a nice command list
1209
    ToStdout("Commands:")
1210
    for cmd in sortedcmds:
1211
      cmdstr = " %s" % (cmd,)
1212
      help_text = commands[cmd][4]
1213
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1214
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1215
      for line in help_lines:
1216
        ToStdout("%-*s   %s", mlen, "", line)
1217

    
1218
    ToStdout("")
1219

    
1220
    return None, None, None
1221

    
1222
  # get command, unalias it, and look it up in commands
1223
  cmd = argv.pop(1)
1224
  if cmd in aliases:
1225
    if cmd in commands:
1226
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1227
                                   " command" % cmd)
1228

    
1229
    if aliases[cmd] not in commands:
1230
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1231
                                   " command '%s'" % (cmd, aliases[cmd]))
1232

    
1233
    cmd = aliases[cmd]
1234

    
1235
  func, args_def, parser_opts, usage, description = commands[cmd]
1236
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1237
                        description=description,
1238
                        formatter=TitledHelpFormatter(),
1239
                        usage="%%prog %s %s" % (cmd, usage))
1240
  parser.disable_interspersed_args()
1241
  options, args = parser.parse_args()
1242

    
1243
  if not _CheckArguments(cmd, args_def, args):
1244
    return None, None, None
1245

    
1246
  return func, options, args
1247

    
1248

    
1249
def _CheckArguments(cmd, args_def, args):
1250
  """Verifies the arguments using the argument definition.
1251

1252
  Algorithm:
1253

1254
    1. Abort with error if values specified by user but none expected.
1255

1256
    1. For each argument in definition
1257

1258
      1. Keep running count of minimum number of values (min_count)
1259
      1. Keep running count of maximum number of values (max_count)
1260
      1. If it has an unlimited number of values
1261

1262
        1. Abort with error if it's not the last argument in the definition
1263

1264
    1. If last argument has limited number of values
1265

1266
      1. Abort with error if number of values doesn't match or is too large
1267

1268
    1. Abort with error if user didn't pass enough values (min_count)
1269

1270
  """
1271
  if args and not args_def:
1272
    ToStderr("Error: Command %s expects no arguments", cmd)
1273
    return False
1274

    
1275
  min_count = None
1276
  max_count = None
1277
  check_max = None
1278

    
1279
  last_idx = len(args_def) - 1
1280

    
1281
  for idx, arg in enumerate(args_def):
1282
    if min_count is None:
1283
      min_count = arg.min
1284
    elif arg.min is not None:
1285
      min_count += arg.min
1286

    
1287
    if max_count is None:
1288
      max_count = arg.max
1289
    elif arg.max is not None:
1290
      max_count += arg.max
1291

    
1292
    if idx == last_idx:
1293
      check_max = (arg.max is not None)
1294

    
1295
    elif arg.max is None:
1296
      raise errors.ProgrammerError("Only the last argument can have max=None")
1297

    
1298
  if check_max:
1299
    # Command with exact number of arguments
1300
    if (min_count is not None and max_count is not None and
1301
        min_count == max_count and len(args) != min_count):
1302
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1303
      return False
1304

    
1305
    # Command with limited number of arguments
1306
    if max_count is not None and len(args) > max_count:
1307
      ToStderr("Error: Command %s expects only %d argument(s)",
1308
               cmd, max_count)
1309
      return False
1310

    
1311
  # Command with some required arguments
1312
  if min_count is not None and len(args) < min_count:
1313
    ToStderr("Error: Command %s expects at least %d argument(s)",
1314
             cmd, min_count)
1315
    return False
1316

    
1317
  return True
1318

    
1319

    
1320
def SplitNodeOption(value):
1321
  """Splits the value of a --node option.
1322

1323
  """
1324
  if value and ':' in value:
1325
    return value.split(':', 1)
1326
  else:
1327
    return (value, None)
1328

    
1329

    
1330
def CalculateOSNames(os_name, os_variants):
1331
  """Calculates all the names an OS can be called, according to its variants.
1332

1333
  @type os_name: string
1334
  @param os_name: base name of the os
1335
  @type os_variants: list or None
1336
  @param os_variants: list of supported variants
1337
  @rtype: list
1338
  @return: list of valid names
1339

1340
  """
1341
  if os_variants:
1342
    return ['%s+%s' % (os_name, v) for v in os_variants]
1343
  else:
1344
    return [os_name]
1345

    
1346

    
1347
def ParseFields(selected, default):
1348
  """Parses the values of "--field"-like options.
1349

1350
  @type selected: string or None
1351
  @param selected: User-selected options
1352
  @type default: list
1353
  @param default: Default fields
1354

1355
  """
1356
  if selected is None:
1357
    return default
1358

    
1359
  if selected.startswith("+"):
1360
    return default + selected[1:].split(",")
1361

    
1362
  return selected.split(",")
1363

    
1364

    
1365
UsesRPC = rpc.RunWithRPC
1366

    
1367

    
1368
def AskUser(text, choices=None):
1369
  """Ask the user a question.
1370

1371
  @param text: the question to ask
1372

1373
  @param choices: list with elements tuples (input_char, return_value,
1374
      description); if not given, it will default to: [('y', True,
1375
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1376
      note that the '?' char is reserved for help
1377

1378
  @return: one of the return values from the choices list; if input is
1379
      not possible (i.e. not running with a tty, we return the last
1380
      entry from the list
1381

1382
  """
1383
  if choices is None:
1384
    choices = [('y', True, 'Perform the operation'),
1385
               ('n', False, 'Do not perform the operation')]
1386
  if not choices or not isinstance(choices, list):
1387
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1388
  for entry in choices:
1389
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1390
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1391

    
1392
  answer = choices[-1][1]
1393
  new_text = []
1394
  for line in text.splitlines():
1395
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1396
  text = "\n".join(new_text)
1397
  try:
1398
    f = file("/dev/tty", "a+")
1399
  except IOError:
1400
    return answer
1401
  try:
1402
    chars = [entry[0] for entry in choices]
1403
    chars[-1] = "[%s]" % chars[-1]
1404
    chars.append('?')
1405
    maps = dict([(entry[0], entry[1]) for entry in choices])
1406
    while True:
1407
      f.write(text)
1408
      f.write('\n')
1409
      f.write("/".join(chars))
1410
      f.write(": ")
1411
      line = f.readline(2).strip().lower()
1412
      if line in maps:
1413
        answer = maps[line]
1414
        break
1415
      elif line == '?':
1416
        for entry in choices:
1417
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1418
        f.write("\n")
1419
        continue
1420
  finally:
1421
    f.close()
1422
  return answer
1423

    
1424

    
1425
class JobSubmittedException(Exception):
1426
  """Job was submitted, client should exit.
1427

1428
  This exception has one argument, the ID of the job that was
1429
  submitted. The handler should print this ID.
1430

1431
  This is not an error, just a structured way to exit from clients.
1432

1433
  """
1434

    
1435

    
1436
def SendJob(ops, cl=None):
1437
  """Function to submit an opcode without waiting for the results.
1438

1439
  @type ops: list
1440
  @param ops: list of opcodes
1441
  @type cl: luxi.Client
1442
  @param cl: the luxi client to use for communicating with the master;
1443
             if None, a new client will be created
1444

1445
  """
1446
  if cl is None:
1447
    cl = GetClient()
1448

    
1449
  job_id = cl.SubmitJob(ops)
1450

    
1451
  return job_id
1452

    
1453

    
1454
def GenericPollJob(job_id, cbs, report_cbs):
1455
  """Generic job-polling function.
1456

1457
  @type job_id: number
1458
  @param job_id: Job ID
1459
  @type cbs: Instance of L{JobPollCbBase}
1460
  @param cbs: Data callbacks
1461
  @type report_cbs: Instance of L{JobPollReportCbBase}
1462
  @param report_cbs: Reporting callbacks
1463

1464
  """
1465
  prev_job_info = None
1466
  prev_logmsg_serial = None
1467

    
1468
  status = None
1469

    
1470
  while True:
1471
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1472
                                      prev_logmsg_serial)
1473
    if not result:
1474
      # job not found, go away!
1475
      raise errors.JobLost("Job with id %s lost" % job_id)
1476

    
1477
    if result == constants.JOB_NOTCHANGED:
1478
      report_cbs.ReportNotChanged(job_id, status)
1479

    
1480
      # Wait again
1481
      continue
1482

    
1483
    # Split result, a tuple of (field values, log entries)
1484
    (job_info, log_entries) = result
1485
    (status, ) = job_info
1486

    
1487
    if log_entries:
1488
      for log_entry in log_entries:
1489
        (serial, timestamp, log_type, message) = log_entry
1490
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1491
                                    log_type, message)
1492
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1493

    
1494
    # TODO: Handle canceled and archived jobs
1495
    elif status in (constants.JOB_STATUS_SUCCESS,
1496
                    constants.JOB_STATUS_ERROR,
1497
                    constants.JOB_STATUS_CANCELING,
1498
                    constants.JOB_STATUS_CANCELED):
1499
      break
1500

    
1501
    prev_job_info = job_info
1502

    
1503
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1504
  if not jobs:
1505
    raise errors.JobLost("Job with id %s lost" % job_id)
1506

    
1507
  status, opstatus, result = jobs[0]
1508

    
1509
  if status == constants.JOB_STATUS_SUCCESS:
1510
    return result
1511

    
1512
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1513
    raise errors.OpExecError("Job was canceled")
1514

    
1515
  has_ok = False
1516
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1517
    if status == constants.OP_STATUS_SUCCESS:
1518
      has_ok = True
1519
    elif status == constants.OP_STATUS_ERROR:
1520
      errors.MaybeRaise(msg)
1521

    
1522
      if has_ok:
1523
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1524
                                 (idx, msg))
1525

    
1526
      raise errors.OpExecError(str(msg))
1527

    
1528
  # default failure mode
1529
  raise errors.OpExecError(result)
1530

    
1531

    
1532
class JobPollCbBase:
1533
  """Base class for L{GenericPollJob} callbacks.
1534

1535
  """
1536
  def __init__(self):
1537
    """Initializes this class.
1538

1539
    """
1540

    
1541
  def WaitForJobChangeOnce(self, job_id, fields,
1542
                           prev_job_info, prev_log_serial):
1543
    """Waits for changes on a job.
1544

1545
    """
1546
    raise NotImplementedError()
1547

    
1548
  def QueryJobs(self, job_ids, fields):
1549
    """Returns the selected fields for the selected job IDs.
1550

1551
    @type job_ids: list of numbers
1552
    @param job_ids: Job IDs
1553
    @type fields: list of strings
1554
    @param fields: Fields
1555

1556
    """
1557
    raise NotImplementedError()
1558

    
1559

    
1560
class JobPollReportCbBase:
1561
  """Base class for L{GenericPollJob} reporting callbacks.
1562

1563
  """
1564
  def __init__(self):
1565
    """Initializes this class.
1566

1567
    """
1568

    
1569
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1570
    """Handles a log message.
1571

1572
    """
1573
    raise NotImplementedError()
1574

    
1575
  def ReportNotChanged(self, job_id, status):
1576
    """Called for if a job hasn't changed in a while.
1577

1578
    @type job_id: number
1579
    @param job_id: Job ID
1580
    @type status: string or None
1581
    @param status: Job status if available
1582

1583
    """
1584
    raise NotImplementedError()
1585

    
1586

    
1587
class _LuxiJobPollCb(JobPollCbBase):
1588
  def __init__(self, cl):
1589
    """Initializes this class.
1590

1591
    """
1592
    JobPollCbBase.__init__(self)
1593
    self.cl = cl
1594

    
1595
  def WaitForJobChangeOnce(self, job_id, fields,
1596
                           prev_job_info, prev_log_serial):
1597
    """Waits for changes on a job.
1598

1599
    """
1600
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1601
                                        prev_job_info, prev_log_serial)
1602

    
1603
  def QueryJobs(self, job_ids, fields):
1604
    """Returns the selected fields for the selected job IDs.
1605

1606
    """
1607
    return self.cl.QueryJobs(job_ids, fields)
1608

    
1609

    
1610
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1611
  def __init__(self, feedback_fn):
1612
    """Initializes this class.
1613

1614
    """
1615
    JobPollReportCbBase.__init__(self)
1616

    
1617
    self.feedback_fn = feedback_fn
1618

    
1619
    assert callable(feedback_fn)
1620

    
1621
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1622
    """Handles a log message.
1623

1624
    """
1625
    self.feedback_fn((timestamp, log_type, log_msg))
1626

    
1627
  def ReportNotChanged(self, job_id, status):
1628
    """Called if a job hasn't changed in a while.
1629

1630
    """
1631
    # Ignore
1632

    
1633

    
1634
class StdioJobPollReportCb(JobPollReportCbBase):
1635
  def __init__(self):
1636
    """Initializes this class.
1637

1638
    """
1639
    JobPollReportCbBase.__init__(self)
1640

    
1641
    self.notified_queued = False
1642
    self.notified_waitlock = False
1643

    
1644
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1645
    """Handles a log message.
1646

1647
    """
1648
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1649
             FormatLogMessage(log_type, log_msg))
1650

    
1651
  def ReportNotChanged(self, job_id, status):
1652
    """Called if a job hasn't changed in a while.
1653

1654
    """
1655
    if status is None:
1656
      return
1657

    
1658
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1659
      ToStderr("Job %s is waiting in queue", job_id)
1660
      self.notified_queued = True
1661

    
1662
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1663
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1664
      self.notified_waitlock = True
1665

    
1666

    
1667
def FormatLogMessage(log_type, log_msg):
1668
  """Formats a job message according to its type.
1669

1670
  """
1671
  if log_type != constants.ELOG_MESSAGE:
1672
    log_msg = str(log_msg)
1673

    
1674
  return utils.SafeEncode(log_msg)
1675

    
1676

    
1677
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1678
  """Function to poll for the result of a job.
1679

1680
  @type job_id: job identified
1681
  @param job_id: the job to poll for results
1682
  @type cl: luxi.Client
1683
  @param cl: the luxi client to use for communicating with the master;
1684
             if None, a new client will be created
1685

1686
  """
1687
  if cl is None:
1688
    cl = GetClient()
1689

    
1690
  if reporter is None:
1691
    if feedback_fn:
1692
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1693
    else:
1694
      reporter = StdioJobPollReportCb()
1695
  elif feedback_fn:
1696
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1697

    
1698
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1699

    
1700

    
1701
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1702
  """Legacy function to submit an opcode.
1703

1704
  This is just a simple wrapper over the construction of the processor
1705
  instance. It should be extended to better handle feedback and
1706
  interaction functions.
1707

1708
  """
1709
  if cl is None:
1710
    cl = GetClient()
1711

    
1712
  SetGenericOpcodeOpts([op], opts)
1713

    
1714
  job_id = SendJob([op], cl=cl)
1715

    
1716
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1717
                       reporter=reporter)
1718

    
1719
  return op_results[0]
1720

    
1721

    
1722
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1723
  """Wrapper around SubmitOpCode or SendJob.
1724

1725
  This function will decide, based on the 'opts' parameter, whether to
1726
  submit and wait for the result of the opcode (and return it), or
1727
  whether to just send the job and print its identifier. It is used in
1728
  order to simplify the implementation of the '--submit' option.
1729

1730
  It will also process the opcodes if we're sending the via SendJob
1731
  (otherwise SubmitOpCode does it).
1732

1733
  """
1734
  if opts and opts.submit_only:
1735
    job = [op]
1736
    SetGenericOpcodeOpts(job, opts)
1737
    job_id = SendJob(job, cl=cl)
1738
    raise JobSubmittedException(job_id)
1739
  else:
1740
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1741

    
1742

    
1743
def SetGenericOpcodeOpts(opcode_list, options):
1744
  """Processor for generic options.
1745

1746
  This function updates the given opcodes based on generic command
1747
  line options (like debug, dry-run, etc.).
1748

1749
  @param opcode_list: list of opcodes
1750
  @param options: command line options or None
1751
  @return: None (in-place modification)
1752

1753
  """
1754
  if not options:
1755
    return
1756
  for op in opcode_list:
1757
    op.debug_level = options.debug
1758
    if hasattr(options, "dry_run"):
1759
      op.dry_run = options.dry_run
1760
    if getattr(options, "priority", None) is not None:
1761
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1762

    
1763

    
1764
def GetClient():
1765
  # TODO: Cache object?
1766
  try:
1767
    client = luxi.Client()
1768
  except luxi.NoMasterError:
1769
    ss = ssconf.SimpleStore()
1770

    
1771
    # Try to read ssconf file
1772
    try:
1773
      ss.GetMasterNode()
1774
    except errors.ConfigurationError:
1775
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1776
                                 " not part of a cluster")
1777

    
1778
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1779
    if master != myself:
1780
      raise errors.OpPrereqError("This is not the master node, please connect"
1781
                                 " to node '%s' and rerun the command" %
1782
                                 master)
1783
    raise
1784
  return client
1785

    
1786

    
1787
def FormatError(err):
1788
  """Return a formatted error message for a given error.
1789

1790
  This function takes an exception instance and returns a tuple
1791
  consisting of two values: first, the recommended exit code, and
1792
  second, a string describing the error message (not
1793
  newline-terminated).
1794

1795
  """
1796
  retcode = 1
1797
  obuf = StringIO()
1798
  msg = str(err)
1799
  if isinstance(err, errors.ConfigurationError):
1800
    txt = "Corrupt configuration file: %s" % msg
1801
    logging.error(txt)
1802
    obuf.write(txt + "\n")
1803
    obuf.write("Aborting.")
1804
    retcode = 2
1805
  elif isinstance(err, errors.HooksAbort):
1806
    obuf.write("Failure: hooks execution failed:\n")
1807
    for node, script, out in err.args[0]:
1808
      if out:
1809
        obuf.write("  node: %s, script: %s, output: %s\n" %
1810
                   (node, script, out))
1811
      else:
1812
        obuf.write("  node: %s, script: %s (no output)\n" %
1813
                   (node, script))
1814
  elif isinstance(err, errors.HooksFailure):
1815
    obuf.write("Failure: hooks general failure: %s" % msg)
1816
  elif isinstance(err, errors.ResolverError):
1817
    this_host = netutils.Hostname.GetSysName()
1818
    if err.args[0] == this_host:
1819
      msg = "Failure: can't resolve my own hostname ('%s')"
1820
    else:
1821
      msg = "Failure: can't resolve hostname '%s'"
1822
    obuf.write(msg % err.args[0])
1823
  elif isinstance(err, errors.OpPrereqError):
1824
    if len(err.args) == 2:
1825
      obuf.write("Failure: prerequisites not met for this"
1826
               " operation:\nerror type: %s, error details:\n%s" %
1827
                 (err.args[1], err.args[0]))
1828
    else:
1829
      obuf.write("Failure: prerequisites not met for this"
1830
                 " operation:\n%s" % msg)
1831
  elif isinstance(err, errors.OpExecError):
1832
    obuf.write("Failure: command execution error:\n%s" % msg)
1833
  elif isinstance(err, errors.TagError):
1834
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1835
  elif isinstance(err, errors.JobQueueDrainError):
1836
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1837
               " accept new requests\n")
1838
  elif isinstance(err, errors.JobQueueFull):
1839
    obuf.write("Failure: the job queue is full and doesn't accept new"
1840
               " job submissions until old jobs are archived\n")
1841
  elif isinstance(err, errors.TypeEnforcementError):
1842
    obuf.write("Parameter Error: %s" % msg)
1843
  elif isinstance(err, errors.ParameterError):
1844
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1845
  elif isinstance(err, luxi.NoMasterError):
1846
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1847
               " and listening for connections?")
1848
  elif isinstance(err, luxi.TimeoutError):
1849
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1850
               " been submitted and will continue to run even if the call"
1851
               " timed out. Useful commands in this situation are \"gnt-job"
1852
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1853
    obuf.write(msg)
1854
  elif isinstance(err, luxi.PermissionError):
1855
    obuf.write("It seems you don't have permissions to connect to the"
1856
               " master daemon.\nPlease retry as a different user.")
1857
  elif isinstance(err, luxi.ProtocolError):
1858
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1859
               "%s" % msg)
1860
  elif isinstance(err, errors.JobLost):
1861
    obuf.write("Error checking job status: %s" % msg)
1862
  elif isinstance(err, errors.GenericError):
1863
    obuf.write("Unhandled Ganeti error: %s" % msg)
1864
  elif isinstance(err, JobSubmittedException):
1865
    obuf.write("JobID: %s\n" % err.args[0])
1866
    retcode = 0
1867
  else:
1868
    obuf.write("Unhandled exception: %s" % msg)
1869
  return retcode, obuf.getvalue().rstrip('\n')
1870

    
1871

    
1872
def GenericMain(commands, override=None, aliases=None):
1873
  """Generic main function for all the gnt-* commands.
1874

1875
  Arguments:
1876
    - commands: a dictionary with a special structure, see the design doc
1877
                for command line handling.
1878
    - override: if not None, we expect a dictionary with keys that will
1879
                override command line options; this can be used to pass
1880
                options from the scripts to generic functions
1881
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1882

1883
  """
1884
  # save the program name and the entire command line for later logging
1885
  if sys.argv:
1886
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1887
    if len(sys.argv) >= 2:
1888
      binary += " " + sys.argv[1]
1889
      old_cmdline = " ".join(sys.argv[2:])
1890
    else:
1891
      old_cmdline = ""
1892
  else:
1893
    binary = "<unknown program>"
1894
    old_cmdline = ""
1895

    
1896
  if aliases is None:
1897
    aliases = {}
1898

    
1899
  try:
1900
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1901
  except errors.ParameterError, err:
1902
    result, err_msg = FormatError(err)
1903
    ToStderr(err_msg)
1904
    return 1
1905

    
1906
  if func is None: # parse error
1907
    return 1
1908

    
1909
  if override is not None:
1910
    for key, val in override.iteritems():
1911
      setattr(options, key, val)
1912

    
1913
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1914
                     stderr_logging=True, program=binary)
1915

    
1916
  if old_cmdline:
1917
    logging.info("run with arguments '%s'", old_cmdline)
1918
  else:
1919
    logging.info("run with no arguments")
1920

    
1921
  try:
1922
    result = func(options, args)
1923
  except (errors.GenericError, luxi.ProtocolError,
1924
          JobSubmittedException), err:
1925
    result, err_msg = FormatError(err)
1926
    logging.exception("Error during command processing")
1927
    ToStderr(err_msg)
1928

    
1929
  return result
1930

    
1931

    
1932
def ParseNicOption(optvalue):
1933
  """Parses the value of the --net option(s).
1934

1935
  """
1936
  try:
1937
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1938
  except (TypeError, ValueError), err:
1939
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1940

    
1941
  nics = [{}] * nic_max
1942
  for nidx, ndict in optvalue:
1943
    nidx = int(nidx)
1944

    
1945
    if not isinstance(ndict, dict):
1946
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1947
                                 " got %s" % (nidx, ndict))
1948

    
1949
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1950

    
1951
    nics[nidx] = ndict
1952

    
1953
  return nics
1954

    
1955

    
1956
def GenericInstanceCreate(mode, opts, args):
1957
  """Add an instance to the cluster via either creation or import.
1958

1959
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1960
  @param opts: the command line options selected by the user
1961
  @type args: list
1962
  @param args: should contain only one element, the new instance name
1963
  @rtype: int
1964
  @return: the desired exit code
1965

1966
  """
1967
  instance = args[0]
1968

    
1969
  (pnode, snode) = SplitNodeOption(opts.node)
1970

    
1971
  hypervisor = None
1972
  hvparams = {}
1973
  if opts.hypervisor:
1974
    hypervisor, hvparams = opts.hypervisor
1975

    
1976
  if opts.nics:
1977
    nics = ParseNicOption(opts.nics)
1978
  elif opts.no_nics:
1979
    # no nics
1980
    nics = []
1981
  elif mode == constants.INSTANCE_CREATE:
1982
    # default of one nic, all auto
1983
    nics = [{}]
1984
  else:
1985
    # mode == import
1986
    nics = []
1987

    
1988
  if opts.disk_template == constants.DT_DISKLESS:
1989
    if opts.disks or opts.sd_size is not None:
1990
      raise errors.OpPrereqError("Diskless instance but disk"
1991
                                 " information passed")
1992
    disks = []
1993
  else:
1994
    if (not opts.disks and not opts.sd_size
1995
        and mode == constants.INSTANCE_CREATE):
1996
      raise errors.OpPrereqError("No disk information specified")
1997
    if opts.disks and opts.sd_size is not None:
1998
      raise errors.OpPrereqError("Please use either the '--disk' or"
1999
                                 " '-s' option")
2000
    if opts.sd_size is not None:
2001
      opts.disks = [(0, {"size": opts.sd_size})]
2002

    
2003
    if opts.disks:
2004
      try:
2005
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2006
      except ValueError, err:
2007
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2008
      disks = [{}] * disk_max
2009
    else:
2010
      disks = []
2011
    for didx, ddict in opts.disks:
2012
      didx = int(didx)
2013
      if not isinstance(ddict, dict):
2014
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2015
        raise errors.OpPrereqError(msg)
2016
      elif "size" in ddict:
2017
        if "adopt" in ddict:
2018
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2019
                                     " (disk %d)" % didx)
2020
        try:
2021
          ddict["size"] = utils.ParseUnit(ddict["size"])
2022
        except ValueError, err:
2023
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2024
                                     (didx, err))
2025
      elif "adopt" in ddict:
2026
        if mode == constants.INSTANCE_IMPORT:
2027
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2028
                                     " import")
2029
        ddict["size"] = 0
2030
      else:
2031
        raise errors.OpPrereqError("Missing size or adoption source for"
2032
                                   " disk %d" % didx)
2033
      disks[didx] = ddict
2034

    
2035
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2036
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2037

    
2038
  if mode == constants.INSTANCE_CREATE:
2039
    start = opts.start
2040
    os_type = opts.os
2041
    force_variant = opts.force_variant
2042
    src_node = None
2043
    src_path = None
2044
    no_install = opts.no_install
2045
    identify_defaults = False
2046
  elif mode == constants.INSTANCE_IMPORT:
2047
    start = False
2048
    os_type = None
2049
    force_variant = False
2050
    src_node = opts.src_node
2051
    src_path = opts.src_dir
2052
    no_install = None
2053
    identify_defaults = opts.identify_defaults
2054
  else:
2055
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2056

    
2057
  op = opcodes.OpCreateInstance(instance_name=instance,
2058
                                disks=disks,
2059
                                disk_template=opts.disk_template,
2060
                                nics=nics,
2061
                                pnode=pnode, snode=snode,
2062
                                ip_check=opts.ip_check,
2063
                                name_check=opts.name_check,
2064
                                wait_for_sync=opts.wait_for_sync,
2065
                                file_storage_dir=opts.file_storage_dir,
2066
                                file_driver=opts.file_driver,
2067
                                iallocator=opts.iallocator,
2068
                                hypervisor=hypervisor,
2069
                                hvparams=hvparams,
2070
                                beparams=opts.beparams,
2071
                                osparams=opts.osparams,
2072
                                mode=mode,
2073
                                start=start,
2074
                                os_type=os_type,
2075
                                force_variant=force_variant,
2076
                                src_node=src_node,
2077
                                src_path=src_path,
2078
                                no_install=no_install,
2079
                                identify_defaults=identify_defaults)
2080

    
2081
  SubmitOrSend(op, opts)
2082
  return 0
2083

    
2084

    
2085
class _RunWhileClusterStoppedHelper:
2086
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2087

2088
  """
2089
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2090
    """Initializes this class.
2091

2092
    @type feedback_fn: callable
2093
    @param feedback_fn: Feedback function
2094
    @type cluster_name: string
2095
    @param cluster_name: Cluster name
2096
    @type master_node: string
2097
    @param master_node Master node name
2098
    @type online_nodes: list
2099
    @param online_nodes: List of names of online nodes
2100

2101
    """
2102
    self.feedback_fn = feedback_fn
2103
    self.cluster_name = cluster_name
2104
    self.master_node = master_node
2105
    self.online_nodes = online_nodes
2106

    
2107
    self.ssh = ssh.SshRunner(self.cluster_name)
2108

    
2109
    self.nonmaster_nodes = [name for name in online_nodes
2110
                            if name != master_node]
2111

    
2112
    assert self.master_node not in self.nonmaster_nodes
2113

    
2114
  def _RunCmd(self, node_name, cmd):
2115
    """Runs a command on the local or a remote machine.
2116

2117
    @type node_name: string
2118
    @param node_name: Machine name
2119
    @type cmd: list
2120
    @param cmd: Command
2121

2122
    """
2123
    if node_name is None or node_name == self.master_node:
2124
      # No need to use SSH
2125
      result = utils.RunCmd(cmd)
2126
    else:
2127
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2128

    
2129
    if result.failed:
2130
      errmsg = ["Failed to run command %s" % result.cmd]
2131
      if node_name:
2132
        errmsg.append("on node %s" % node_name)
2133
      errmsg.append(": exitcode %s and error %s" %
2134
                    (result.exit_code, result.output))
2135
      raise errors.OpExecError(" ".join(errmsg))
2136

    
2137
  def Call(self, fn, *args):
2138
    """Call function while all daemons are stopped.
2139

2140
    @type fn: callable
2141
    @param fn: Function to be called
2142

2143
    """
2144
    # Pause watcher by acquiring an exclusive lock on watcher state file
2145
    self.feedback_fn("Blocking watcher")
2146
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2147
    try:
2148
      # TODO: Currently, this just blocks. There's no timeout.
2149
      # TODO: Should it be a shared lock?
2150
      watcher_block.Exclusive(blocking=True)
2151

    
2152
      # Stop master daemons, so that no new jobs can come in and all running
2153
      # ones are finished
2154
      self.feedback_fn("Stopping master daemons")
2155
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2156
      try:
2157
        # Stop daemons on all nodes
2158
        for node_name in self.online_nodes:
2159
          self.feedback_fn("Stopping daemons on %s" % node_name)
2160
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2161

    
2162
        # All daemons are shut down now
2163
        try:
2164
          return fn(self, *args)
2165
        except Exception, err:
2166
          _, errmsg = FormatError(err)
2167
          logging.exception("Caught exception")
2168
          self.feedback_fn(errmsg)
2169
          raise
2170
      finally:
2171
        # Start cluster again, master node last
2172
        for node_name in self.nonmaster_nodes + [self.master_node]:
2173
          self.feedback_fn("Starting daemons on %s" % node_name)
2174
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2175
    finally:
2176
      # Resume watcher
2177
      watcher_block.Close()
2178

    
2179

    
2180
def RunWhileClusterStopped(feedback_fn, fn, *args):
2181
  """Calls a function while all cluster daemons are stopped.
2182

2183
  @type feedback_fn: callable
2184
  @param feedback_fn: Feedback function
2185
  @type fn: callable
2186
  @param fn: Function to be called when daemons are stopped
2187

2188
  """
2189
  feedback_fn("Gathering cluster information")
2190

    
2191
  # This ensures we're running on the master daemon
2192
  cl = GetClient()
2193

    
2194
  (cluster_name, master_node) = \
2195
    cl.QueryConfigValues(["cluster_name", "master_node"])
2196

    
2197
  online_nodes = GetOnlineNodes([], cl=cl)
2198

    
2199
  # Don't keep a reference to the client. The master daemon will go away.
2200
  del cl
2201

    
2202
  assert master_node in online_nodes
2203

    
2204
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2205
                                       online_nodes).Call(fn, *args)
2206

    
2207

    
2208
def GenerateTable(headers, fields, separator, data,
2209
                  numfields=None, unitfields=None,
2210
                  units=None):
2211
  """Prints a table with headers and different fields.
2212

2213
  @type headers: dict
2214
  @param headers: dictionary mapping field names to headers for
2215
      the table
2216
  @type fields: list
2217
  @param fields: the field names corresponding to each row in
2218
      the data field
2219
  @param separator: the separator to be used; if this is None,
2220
      the default 'smart' algorithm is used which computes optimal
2221
      field width, otherwise just the separator is used between
2222
      each field
2223
  @type data: list
2224
  @param data: a list of lists, each sublist being one row to be output
2225
  @type numfields: list
2226
  @param numfields: a list with the fields that hold numeric
2227
      values and thus should be right-aligned
2228
  @type unitfields: list
2229
  @param unitfields: a list with the fields that hold numeric
2230
      values that should be formatted with the units field
2231
  @type units: string or None
2232
  @param units: the units we should use for formatting, or None for
2233
      automatic choice (human-readable for non-separator usage, otherwise
2234
      megabytes); this is a one-letter string
2235

2236
  """
2237
  if units is None:
2238
    if separator:
2239
      units = "m"
2240
    else:
2241
      units = "h"
2242

    
2243
  if numfields is None:
2244
    numfields = []
2245
  if unitfields is None:
2246
    unitfields = []
2247

    
2248
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2249
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2250

    
2251
  format_fields = []
2252
  for field in fields:
2253
    if headers and field not in headers:
2254
      # TODO: handle better unknown fields (either revert to old
2255
      # style of raising exception, or deal more intelligently with
2256
      # variable fields)
2257
      headers[field] = field
2258
    if separator is not None:
2259
      format_fields.append("%s")
2260
    elif numfields.Matches(field):
2261
      format_fields.append("%*s")
2262
    else:
2263
      format_fields.append("%-*s")
2264

    
2265
  if separator is None:
2266
    mlens = [0 for name in fields]
2267
    format_str = ' '.join(format_fields)
2268
  else:
2269
    format_str = separator.replace("%", "%%").join(format_fields)
2270

    
2271
  for row in data:
2272
    if row is None:
2273
      continue
2274
    for idx, val in enumerate(row):
2275
      if unitfields.Matches(fields[idx]):
2276
        try:
2277
          val = int(val)
2278
        except (TypeError, ValueError):
2279
          pass
2280
        else:
2281
          val = row[idx] = utils.FormatUnit(val, units)
2282
      val = row[idx] = str(val)
2283
      if separator is None:
2284
        mlens[idx] = max(mlens[idx], len(val))
2285

    
2286
  result = []
2287
  if headers:
2288
    args = []
2289
    for idx, name in enumerate(fields):
2290
      hdr = headers[name]
2291
      if separator is None:
2292
        mlens[idx] = max(mlens[idx], len(hdr))
2293
        args.append(mlens[idx])
2294
      args.append(hdr)
2295
    result.append(format_str % tuple(args))
2296

    
2297
  if separator is None:
2298
    assert len(mlens) == len(fields)
2299

    
2300
    if fields and not numfields.Matches(fields[-1]):
2301
      mlens[-1] = 0
2302

    
2303
  for line in data:
2304
    args = []
2305
    if line is None:
2306
      line = ['-' for _ in fields]
2307
    for idx in range(len(fields)):
2308
      if separator is None:
2309
        args.append(mlens[idx])
2310
      args.append(line[idx])
2311
    result.append(format_str % tuple(args))
2312

    
2313
  return result
2314

    
2315

    
2316
def _FormatBool(value):
2317
  """Formats a boolean value as a string.
2318

2319
  """
2320
  if value:
2321
    return "Y"
2322
  return "N"
2323

    
2324

    
2325
#: Default formatting for query results; (callback, align right)
2326
_DEFAULT_FORMAT_QUERY = {
2327
  constants.QFT_TEXT: (str, False),
2328
  constants.QFT_BOOL: (_FormatBool, False),
2329
  constants.QFT_NUMBER: (str, True),
2330
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2331
  constants.QFT_OTHER: (str, False),
2332
  constants.QFT_UNKNOWN: (str, False),
2333
  }
2334

    
2335

    
2336
def _GetColumnFormatter(fdef, override, unit):
2337
  """Returns formatting function for a field.
2338

2339
  @type fdef: L{objects.QueryFieldDefinition}
2340
  @type override: dict
2341
  @param override: Dictionary for overriding field formatting functions,
2342
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2343
  @type unit: string
2344
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2345
  @rtype: tuple; (callable, bool)
2346
  @return: Returns the function to format a value (takes one parameter) and a
2347
    boolean for aligning the value on the right-hand side
2348

2349
  """
2350
  fmt = override.get(fdef.name, None)
2351
  if fmt is not None:
2352
    return fmt
2353

    
2354
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2355

    
2356
  if fdef.kind == constants.QFT_UNIT:
2357
    # Can't keep this information in the static dictionary
2358
    return (lambda value: utils.FormatUnit(value, unit), True)
2359

    
2360
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2361
  if fmt is not None:
2362
    return fmt
2363

    
2364
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2365

    
2366

    
2367
class _QueryColumnFormatter:
2368
  """Callable class for formatting fields of a query.
2369

2370
  """
2371
  def __init__(self, fn, status_fn):
2372
    """Initializes this class.
2373

2374
    @type fn: callable
2375
    @param fn: Formatting function
2376
    @type status_fn: callable
2377
    @param status_fn: Function to report fields' status
2378

2379
    """
2380
    self._fn = fn
2381
    self._status_fn = status_fn
2382

    
2383
  def __call__(self, data):
2384
    """Returns a field's string representation.
2385

2386
    """
2387
    (status, value) = data
2388

    
2389
    # Report status
2390
    self._status_fn(status)
2391

    
2392
    if status == constants.QRFS_NORMAL:
2393
      return self._fn(value)
2394

    
2395
    assert value is None, \
2396
           "Found value %r for abnormal status %s" % (value, status)
2397

    
2398
    if status == constants.QRFS_UNKNOWN:
2399
      return "(unknown)"
2400

    
2401
    if status == constants.QRFS_NODATA:
2402
      return "(nodata)"
2403

    
2404
    if status == constants.QRFS_UNAVAIL:
2405
      return "(unavail)"
2406

    
2407
    if status == constants.QRFS_OFFLINE:
2408
      return "(offline)"
2409

    
2410
    raise NotImplementedError("Unknown status %s" % status)
2411

    
2412

    
2413
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2414
                      header=False):
2415
  """Formats data in L{objects.QueryResponse}.
2416

2417
  @type result: L{objects.QueryResponse}
2418
  @param result: result of query operation
2419
  @type unit: string
2420
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2421
    see L{utils.text.FormatUnit}
2422
  @type format_override: dict
2423
  @param format_override: Dictionary for overriding field formatting functions,
2424
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2425
  @type separator: string or None
2426
  @param separator: String used to separate fields
2427
  @type header: bool
2428
  @param header: Whether to output header row
2429

2430
  """
2431
  if unit is None:
2432
    if separator:
2433
      unit = "m"
2434
    else:
2435
      unit = "h"
2436

    
2437
  if format_override is None:
2438
    format_override = {}
2439

    
2440
  stats = dict.fromkeys(constants.QRFS_ALL, 0)
2441

    
2442
  def _RecordStatus(status):
2443
    if status in stats:
2444
      stats[status] += 1
2445

    
2446
  columns = []
2447
  for fdef in result.fields:
2448
    assert fdef.title and fdef.name
2449
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2450
    columns.append(TableColumn(fdef.title,
2451
                               _QueryColumnFormatter(fn, _RecordStatus),
2452
                               align_right))
2453

    
2454
  table = FormatTable(result.data, columns, header, separator)
2455

    
2456
  # Collect statistics
2457
  assert len(stats) == len(constants.QRFS_ALL)
2458
  assert compat.all(count >= 0 for count in stats.values())
2459

    
2460
  # Determine overall status. If there was no data, unknown fields must be
2461
  # detected via the field definitions.
2462
  if (stats[constants.QRFS_UNKNOWN] or
2463
      (not result.data and _GetUnknownFields(result.fields))):
2464
    status = QR_UNKNOWN
2465
  elif compat.any(count > 0 for key, count in stats.items()
2466
                  if key != constants.QRFS_NORMAL):
2467
    status = QR_INCOMPLETE
2468
  else:
2469
    status = QR_NORMAL
2470

    
2471
  return (status, table)
2472

    
2473

    
2474
def _GetUnknownFields(fdefs):
2475
  """Returns list of unknown fields included in C{fdefs}.
2476

2477
  @type fdefs: list of L{objects.QueryFieldDefinition}
2478

2479
  """
2480
  return [fdef for fdef in fdefs
2481
          if fdef.kind == constants.QFT_UNKNOWN]
2482

    
2483

    
2484
def _WarnUnknownFields(fdefs):
2485
  """Prints a warning to stderr if a query included unknown fields.
2486

2487
  @type fdefs: list of L{objects.QueryFieldDefinition}
2488

2489
  """
2490
  unknown = _GetUnknownFields(fdefs)
2491
  if unknown:
2492
    ToStderr("Warning: Queried for unknown fields %s",
2493
             utils.CommaJoin(fdef.name for fdef in unknown))
2494
    return True
2495

    
2496
  return False
2497

    
2498

    
2499
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2500
                format_override=None):
2501
  """Generic implementation for listing all items of a resource.
2502

2503
  @param resource: One of L{constants.QR_OP_LUXI}
2504
  @type fields: list of strings
2505
  @param fields: List of fields to query for
2506
  @type names: list of strings
2507
  @param names: Names of items to query for
2508
  @type unit: string or None
2509
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2510
    None for automatic choice (human-readable for non-separator usage,
2511
    otherwise megabytes); this is a one-letter string
2512
  @type separator: string or None
2513
  @param separator: String used to separate fields
2514
  @type header: bool
2515
  @param header: Whether to show header row
2516
  @type format_override: dict
2517
  @param format_override: Dictionary for overriding field formatting functions,
2518
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2519

2520
  """
2521
  if cl is None:
2522
    cl = GetClient()
2523

    
2524
  if not names:
2525
    names = None
2526

    
2527
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2528

    
2529
  found_unknown = _WarnUnknownFields(response.fields)
2530

    
2531
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2532
                                     header=header,
2533
                                     format_override=format_override)
2534

    
2535
  for line in data:
2536
    ToStdout(line)
2537

    
2538
  assert ((found_unknown and status == QR_UNKNOWN) or
2539
          (not found_unknown and status != QR_UNKNOWN))
2540

    
2541
  if status == QR_UNKNOWN:
2542
    return constants.EXIT_UNKNOWN_FIELD
2543

    
2544
  # TODO: Should the list command fail if not all data could be collected?
2545
  return constants.EXIT_SUCCESS
2546

    
2547

    
2548
def GenericListFields(resource, fields, separator, header, cl=None):
2549
  """Generic implementation for listing fields for a resource.
2550

2551
  @param resource: One of L{constants.QR_OP_LUXI}
2552
  @type fields: list of strings
2553
  @param fields: List of fields to query for
2554
  @type separator: string or None
2555
  @param separator: String used to separate fields
2556
  @type header: bool
2557
  @param header: Whether to show header row
2558

2559
  """
2560
  if cl is None:
2561
    cl = GetClient()
2562

    
2563
  if not fields:
2564
    fields = None
2565

    
2566
  response = cl.QueryFields(resource, fields)
2567

    
2568
  found_unknown = _WarnUnknownFields(response.fields)
2569

    
2570
  columns = [
2571
    TableColumn("Name", str, False),
2572
    TableColumn("Title", str, False),
2573
    # TODO: Add field description to master daemon
2574
    ]
2575

    
2576
  rows = [[fdef.name, fdef.title] for fdef in response.fields]
2577

    
2578
  for line in FormatTable(rows, columns, header, separator):
2579
    ToStdout(line)
2580

    
2581
  if found_unknown:
2582
    return constants.EXIT_UNKNOWN_FIELD
2583

    
2584
  return constants.EXIT_SUCCESS
2585

    
2586

    
2587
class TableColumn:
2588
  """Describes a column for L{FormatTable}.
2589

2590
  """
2591
  def __init__(self, title, fn, align_right):
2592
    """Initializes this class.
2593

2594
    @type title: string
2595
    @param title: Column title
2596
    @type fn: callable
2597
    @param fn: Formatting function
2598
    @type align_right: bool
2599
    @param align_right: Whether to align values on the right-hand side
2600

2601
    """
2602
    self.title = title
2603
    self.format = fn
2604
    self.align_right = align_right
2605

    
2606

    
2607
def _GetColFormatString(width, align_right):
2608
  """Returns the format string for a field.
2609

2610
  """
2611
  if align_right:
2612
    sign = ""
2613
  else:
2614
    sign = "-"
2615

    
2616
  return "%%%s%ss" % (sign, width)
2617

    
2618

    
2619
def FormatTable(rows, columns, header, separator):
2620
  """Formats data as a table.
2621

2622
  @type rows: list of lists
2623
  @param rows: Row data, one list per row
2624
  @type columns: list of L{TableColumn}
2625
  @param columns: Column descriptions
2626
  @type header: bool
2627
  @param header: Whether to show header row
2628
  @type separator: string or None
2629
  @param separator: String used to separate columns
2630

2631
  """
2632
  if header:
2633
    data = [[col.title for col in columns]]
2634
    colwidth = [len(col.title) for col in columns]
2635
  else:
2636
    data = []
2637
    colwidth = [0 for _ in columns]
2638

    
2639
  # Format row data
2640
  for row in rows:
2641
    assert len(row) == len(columns)
2642

    
2643
    formatted = [col.format(value) for value, col in zip(row, columns)]
2644

    
2645
    if separator is None:
2646
      # Update column widths
2647
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2648
        # Modifying a list's items while iterating is fine
2649
        colwidth[idx] = max(oldwidth, len(value))
2650

    
2651
    data.append(formatted)
2652

    
2653
  if separator is not None:
2654
    # Return early if a separator is used
2655
    return [separator.join(row) for row in data]
2656

    
2657
  if columns and not columns[-1].align_right:
2658
    # Avoid unnecessary spaces at end of line
2659
    colwidth[-1] = 0
2660

    
2661
  # Build format string
2662
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2663
                  for col, width in zip(columns, colwidth)])
2664

    
2665
  return [fmt % tuple(row) for row in data]
2666

    
2667

    
2668
def FormatTimestamp(ts):
2669
  """Formats a given timestamp.
2670

2671
  @type ts: timestamp
2672
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2673

2674
  @rtype: string
2675
  @return: a string with the formatted timestamp
2676

2677
  """
2678
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2679
    return '?'
2680
  sec, usec = ts
2681
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2682

    
2683

    
2684
def ParseTimespec(value):
2685
  """Parse a time specification.
2686

2687
  The following suffixed will be recognized:
2688

2689
    - s: seconds
2690
    - m: minutes
2691
    - h: hours
2692
    - d: day
2693
    - w: weeks
2694

2695
  Without any suffix, the value will be taken to be in seconds.
2696

2697
  """
2698
  value = str(value)
2699
  if not value:
2700
    raise errors.OpPrereqError("Empty time specification passed")
2701
  suffix_map = {
2702
    's': 1,
2703
    'm': 60,
2704
    'h': 3600,
2705
    'd': 86400,
2706
    'w': 604800,
2707
    }
2708
  if value[-1] not in suffix_map:
2709
    try:
2710
      value = int(value)
2711
    except (TypeError, ValueError):
2712
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2713
  else:
2714
    multiplier = suffix_map[value[-1]]
2715
    value = value[:-1]
2716
    if not value: # no data left after stripping the suffix
2717
      raise errors.OpPrereqError("Invalid time specification (only"
2718
                                 " suffix passed)")
2719
    try:
2720
      value = int(value) * multiplier
2721
    except (TypeError, ValueError):
2722
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2723
  return value
2724

    
2725

    
2726
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2727
                   filter_master=False):
2728
  """Returns the names of online nodes.
2729

2730
  This function will also log a warning on stderr with the names of
2731
  the online nodes.
2732

2733
  @param nodes: if not empty, use only this subset of nodes (minus the
2734
      offline ones)
2735
  @param cl: if not None, luxi client to use
2736
  @type nowarn: boolean
2737
  @param nowarn: by default, this function will output a note with the
2738
      offline nodes that are skipped; if this parameter is True the
2739
      note is not displayed
2740
  @type secondary_ips: boolean
2741
  @param secondary_ips: if True, return the secondary IPs instead of the
2742
      names, useful for doing network traffic over the replication interface
2743
      (if any)
2744
  @type filter_master: boolean
2745
  @param filter_master: if True, do not return the master node in the list
2746
      (useful in coordination with secondary_ips where we cannot check our
2747
      node name against the list)
2748

2749
  """
2750
  if cl is None:
2751
    cl = GetClient()
2752

    
2753
  if secondary_ips:
2754
    name_idx = 2
2755
  else:
2756
    name_idx = 0
2757

    
2758
  if filter_master:
2759
    master_node = cl.QueryConfigValues(["master_node"])[0]
2760
    filter_fn = lambda x: x != master_node
2761
  else:
2762
    filter_fn = lambda _: True
2763

    
2764
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2765
                         use_locking=False)
2766
  offline = [row[0] for row in result if row[1]]
2767
  if offline and not nowarn:
2768
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2769
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2770

    
2771

    
2772
def _ToStream(stream, txt, *args):
2773
  """Write a message to a stream, bypassing the logging system
2774

2775
  @type stream: file object
2776
  @param stream: the file to which we should write
2777
  @type txt: str
2778
  @param txt: the message
2779

2780
  """
2781
  if args:
2782
    args = tuple(args)
2783
    stream.write(txt % args)
2784
  else:
2785
    stream.write(txt)
2786
  stream.write('\n')
2787
  stream.flush()
2788

    
2789

    
2790
def ToStdout(txt, *args):
2791
  """Write a message to stdout only, bypassing the logging system
2792

2793
  This is just a wrapper over _ToStream.
2794

2795
  @type txt: str
2796
  @param txt: the message
2797

2798
  """
2799
  _ToStream(sys.stdout, txt, *args)
2800

    
2801

    
2802
def ToStderr(txt, *args):
2803
  """Write a message to stderr only, bypassing the logging system
2804

2805
  This is just a wrapper over _ToStream.
2806

2807
  @type txt: str
2808
  @param txt: the message
2809

2810
  """
2811
  _ToStream(sys.stderr, txt, *args)
2812

    
2813

    
2814
class JobExecutor(object):
2815
  """Class which manages the submission and execution of multiple jobs.
2816

2817
  Note that instances of this class should not be reused between
2818
  GetResults() calls.
2819

2820
  """
2821
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2822
    self.queue = []
2823
    if cl is None:
2824
      cl = GetClient()
2825
    self.cl = cl
2826
    self.verbose = verbose
2827
    self.jobs = []
2828
    self.opts = opts
2829
    self.feedback_fn = feedback_fn
2830

    
2831
  def QueueJob(self, name, *ops):
2832
    """Record a job for later submit.
2833

2834
    @type name: string
2835
    @param name: a description of the job, will be used in WaitJobSet
2836
    """
2837
    SetGenericOpcodeOpts(ops, self.opts)
2838
    self.queue.append((name, ops))
2839

    
2840
  def SubmitPending(self, each=False):
2841
    """Submit all pending jobs.
2842

2843
    """
2844
    if each:
2845
      results = []
2846
      for row in self.queue:
2847
        # SubmitJob will remove the success status, but raise an exception if
2848
        # the submission fails, so we'll notice that anyway.
2849
        results.append([True, self.cl.SubmitJob(row[1])])
2850
    else:
2851
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2852
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2853
                                                            self.queue)):
2854
      self.jobs.append((idx, status, data, name))
2855

    
2856
  def _ChooseJob(self):
2857
    """Choose a non-waiting/queued job to poll next.
2858

2859
    """
2860
    assert self.jobs, "_ChooseJob called with empty job list"
2861

    
2862
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2863
    assert result
2864

    
2865
    for job_data, status in zip(self.jobs, result):
2866
      if (isinstance(status, list) and status and
2867
          status[0] in (constants.JOB_STATUS_QUEUED,
2868
                        constants.JOB_STATUS_WAITLOCK,
2869
                        constants.JOB_STATUS_CANCELING)):
2870
        # job is still present and waiting
2871
        continue
2872
      # good candidate found (either running job or lost job)
2873
      self.jobs.remove(job_data)
2874
      return job_data
2875

    
2876
    # no job found
2877
    return self.jobs.pop(0)
2878

    
2879
  def GetResults(self):
2880
    """Wait for and return the results of all jobs.
2881

2882
    @rtype: list
2883
    @return: list of tuples (success, job results), in the same order
2884
        as the submitted jobs; if a job has failed, instead of the result
2885
        there will be the error message
2886

2887
    """
2888
    if not self.jobs:
2889
      self.SubmitPending()
2890
    results = []
2891
    if self.verbose:
2892
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2893
      if ok_jobs:
2894
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2895

    
2896
    # first, remove any non-submitted jobs
2897
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2898
    for idx, _, jid, name in failures:
2899
      ToStderr("Failed to submit job for %s: %s", name, jid)
2900
      results.append((idx, False, jid))
2901

    
2902
    while self.jobs:
2903
      (idx, _, jid, name) = self._ChooseJob()
2904
      ToStdout("Waiting for job %s for %s...", jid, name)
2905
      try:
2906
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2907
        success = True
2908
      except errors.JobLost, err:
2909
        _, job_result = FormatError(err)
2910
        ToStderr("Job %s for %s has been archived, cannot check its result",
2911
                 jid, name)
2912
        success = False
2913
      except (errors.GenericError, luxi.ProtocolError), err:
2914
        _, job_result = FormatError(err)
2915
        success = False
2916
        # the error message will always be shown, verbose or not
2917
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2918

    
2919
      results.append((idx, success, job_result))
2920

    
2921
    # sort based on the index, then drop it
2922
    results.sort()
2923
    results = [i[1:] for i in results]
2924

    
2925
    return results
2926

    
2927
  def WaitOrShow(self, wait):
2928
    """Wait for job results or only print the job IDs.
2929

2930
    @type wait: boolean
2931
    @param wait: whether to wait or not
2932

2933
    """
2934
    if wait:
2935
      return self.GetResults()
2936
    else:
2937
      if not self.jobs:
2938
        self.SubmitPending()
2939
      for _, status, result, name in self.jobs:
2940
        if status:
2941
          ToStdout("%s: %s", result, name)
2942
        else:
2943
          ToStderr("Failure for %s: %s", name, result)
2944
      return [row[1:3] for row in self.jobs]
2945

    
2946

    
2947
def FormatParameterDict(buf, param_dict, actual, level=1):
2948
  """Formats a parameter dictionary.
2949

2950
  @type buf: L{StringIO}
2951
  @param buf: the buffer into which to write
2952
  @type param_dict: dict
2953
  @param param_dict: the own parameters
2954
  @type actual: dict
2955
  @param actual: the current parameter set (including defaults)
2956
  @param level: Level of indent
2957

2958
  """
2959
  indent = "  " * level
2960
  for key in sorted(actual):
2961
    val = param_dict.get(key, "default (%s)" % actual[key])
2962
    buf.write("%s- %s: %s\n" % (indent, key, val))