Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 225e2544

History | View | Annotate | Download (95.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
from cStringIO import StringIO
32

    
33
from ganeti import utils
34
from ganeti import errors
35
from ganeti import constants
36
from ganeti import opcodes
37
from ganeti import luxi
38
from ganeti import ssconf
39
from ganeti import rpc
40
from ganeti import ssh
41
from ganeti import compat
42
from ganeti import netutils
43
from ganeti import qlang
44

    
45
from optparse import (OptionParser, TitledHelpFormatter,
46
                      Option, OptionValueError)
47

    
48

    
49
__all__ = [
50
  # Command line options
51
  "ADD_UIDS_OPT",
52
  "ALLOCATABLE_OPT",
53
  "ALLOC_POLICY_OPT",
54
  "ALL_OPT",
55
  "AUTO_PROMOTE_OPT",
56
  "AUTO_REPLACE_OPT",
57
  "BACKEND_OPT",
58
  "BLK_OS_OPT",
59
  "CAPAB_MASTER_OPT",
60
  "CAPAB_VM_OPT",
61
  "CLEANUP_OPT",
62
  "CLUSTER_DOMAIN_SECRET_OPT",
63
  "CONFIRM_OPT",
64
  "CP_SIZE_OPT",
65
  "DEBUG_OPT",
66
  "DEBUG_SIMERR_OPT",
67
  "DISKIDX_OPT",
68
  "DISK_OPT",
69
  "DISK_TEMPLATE_OPT",
70
  "DRAINED_OPT",
71
  "DRY_RUN_OPT",
72
  "DRBD_HELPER_OPT",
73
  "EARLY_RELEASE_OPT",
74
  "ENABLED_HV_OPT",
75
  "ERROR_CODES_OPT",
76
  "FIELDS_OPT",
77
  "FILESTORE_DIR_OPT",
78
  "FILESTORE_DRIVER_OPT",
79
  "FORCE_OPT",
80
  "FORCE_VARIANT_OPT",
81
  "GLOBAL_FILEDIR_OPT",
82
  "HID_OS_OPT",
83
  "HVLIST_OPT",
84
  "HVOPTS_OPT",
85
  "HYPERVISOR_OPT",
86
  "IALLOCATOR_OPT",
87
  "DEFAULT_IALLOCATOR_OPT",
88
  "IDENTIFY_DEFAULTS_OPT",
89
  "IGNORE_CONSIST_OPT",
90
  "IGNORE_FAILURES_OPT",
91
  "IGNORE_OFFLINE_OPT",
92
  "IGNORE_REMOVE_FAILURES_OPT",
93
  "IGNORE_SECONDARIES_OPT",
94
  "IGNORE_SIZE_OPT",
95
  "INTERVAL_OPT",
96
  "MAC_PREFIX_OPT",
97
  "MAINTAIN_NODE_HEALTH_OPT",
98
  "MASTER_NETDEV_OPT",
99
  "MC_OPT",
100
  "MIGRATION_MODE_OPT",
101
  "NET_OPT",
102
  "NEW_CLUSTER_CERT_OPT",
103
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
104
  "NEW_CONFD_HMAC_KEY_OPT",
105
  "NEW_RAPI_CERT_OPT",
106
  "NEW_SECONDARY_OPT",
107
  "NIC_PARAMS_OPT",
108
  "NODE_FORCE_JOIN_OPT",
109
  "NODE_LIST_OPT",
110
  "NODE_PLACEMENT_OPT",
111
  "NODEGROUP_OPT",
112
  "NODE_PARAMS_OPT",
113
  "NODE_POWERED_OPT",
114
  "NODRBD_STORAGE_OPT",
115
  "NOHDR_OPT",
116
  "NOIPCHECK_OPT",
117
  "NO_INSTALL_OPT",
118
  "NONAMECHECK_OPT",
119
  "NOLVM_STORAGE_OPT",
120
  "NOMODIFY_ETCHOSTS_OPT",
121
  "NOMODIFY_SSH_SETUP_OPT",
122
  "NONICS_OPT",
123
  "NONLIVE_OPT",
124
  "NONPLUS1_OPT",
125
  "NOSHUTDOWN_OPT",
126
  "NOSTART_OPT",
127
  "NOSSH_KEYCHECK_OPT",
128
  "NOVOTING_OPT",
129
  "NWSYNC_OPT",
130
  "ON_PRIMARY_OPT",
131
  "ON_SECONDARY_OPT",
132
  "OFFLINE_OPT",
133
  "OSPARAMS_OPT",
134
  "OS_OPT",
135
  "OS_SIZE_OPT",
136
  "PREALLOC_WIPE_DISKS_OPT",
137
  "PRIMARY_IP_VERSION_OPT",
138
  "PRIORITY_OPT",
139
  "RAPI_CERT_OPT",
140
  "READD_OPT",
141
  "REBOOT_TYPE_OPT",
142
  "REMOVE_INSTANCE_OPT",
143
  "REMOVE_UIDS_OPT",
144
  "RESERVED_LVS_OPT",
145
  "ROMAN_OPT",
146
  "SECONDARY_IP_OPT",
147
  "SELECT_OS_OPT",
148
  "SEP_OPT",
149
  "SHOWCMD_OPT",
150
  "SHUTDOWN_TIMEOUT_OPT",
151
  "SINGLE_NODE_OPT",
152
  "SRC_DIR_OPT",
153
  "SRC_NODE_OPT",
154
  "SUBMIT_OPT",
155
  "STATIC_OPT",
156
  "SYNC_OPT",
157
  "TAG_SRC_OPT",
158
  "TIMEOUT_OPT",
159
  "UIDPOOL_OPT",
160
  "USEUNITS_OPT",
161
  "USE_REPL_NET_OPT",
162
  "VERBOSE_OPT",
163
  "VG_NAME_OPT",
164
  "YES_DOIT_OPT",
165
  # Generic functions for CLI programs
166
  "GenericMain",
167
  "GenericInstanceCreate",
168
  "GenericList",
169
  "GenericListFields",
170
  "GetClient",
171
  "GetOnlineNodes",
172
  "JobExecutor",
173
  "JobSubmittedException",
174
  "ParseTimespec",
175
  "RunWhileClusterStopped",
176
  "SubmitOpCode",
177
  "SubmitOrSend",
178
  "UsesRPC",
179
  # Formatting functions
180
  "ToStderr", "ToStdout",
181
  "FormatError",
182
  "FormatQueryResult",
183
  "FormatParameterDict",
184
  "GenerateTable",
185
  "AskUser",
186
  "FormatTimestamp",
187
  "FormatLogMessage",
188
  # Tags functions
189
  "ListTags",
190
  "AddTags",
191
  "RemoveTags",
192
  # command line options support infrastructure
193
  "ARGS_MANY_INSTANCES",
194
  "ARGS_MANY_NODES",
195
  "ARGS_MANY_GROUPS",
196
  "ARGS_NONE",
197
  "ARGS_ONE_INSTANCE",
198
  "ARGS_ONE_NODE",
199
  "ARGS_ONE_GROUP",
200
  "ARGS_ONE_OS",
201
  "ArgChoice",
202
  "ArgCommand",
203
  "ArgFile",
204
  "ArgGroup",
205
  "ArgHost",
206
  "ArgInstance",
207
  "ArgJobId",
208
  "ArgNode",
209
  "ArgOs",
210
  "ArgSuggest",
211
  "ArgUnknown",
212
  "OPT_COMPL_INST_ADD_NODES",
213
  "OPT_COMPL_MANY_NODES",
214
  "OPT_COMPL_ONE_IALLOCATOR",
215
  "OPT_COMPL_ONE_INSTANCE",
216
  "OPT_COMPL_ONE_NODE",
217
  "OPT_COMPL_ONE_NODEGROUP",
218
  "OPT_COMPL_ONE_OS",
219
  "cli_option",
220
  "SplitNodeOption",
221
  "CalculateOSNames",
222
  "ParseFields",
223
  "COMMON_CREATE_OPTS",
224
  ]
225

    
226
NO_PREFIX = "no_"
227
UN_PREFIX = "-"
228

    
229
#: Priorities (sorted)
230
_PRIORITY_NAMES = [
231
  ("low", constants.OP_PRIO_LOW),
232
  ("normal", constants.OP_PRIO_NORMAL),
233
  ("high", constants.OP_PRIO_HIGH),
234
  ]
235

    
236
#: Priority dictionary for easier lookup
237
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
238
# we migrate to Python 2.6
239
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
240

    
241
# Query result status for clients
242
(QR_NORMAL,
243
 QR_UNKNOWN,
244
 QR_INCOMPLETE) = range(3)
245

    
246

    
247
class _Argument:
248
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
249
    self.min = min
250
    self.max = max
251

    
252
  def __repr__(self):
253
    return ("<%s min=%s max=%s>" %
254
            (self.__class__.__name__, self.min, self.max))
255

    
256

    
257
class ArgSuggest(_Argument):
258
  """Suggesting argument.
259

260
  Value can be any of the ones passed to the constructor.
261

262
  """
263
  # pylint: disable-msg=W0622
264
  def __init__(self, min=0, max=None, choices=None):
265
    _Argument.__init__(self, min=min, max=max)
266
    self.choices = choices
267

    
268
  def __repr__(self):
269
    return ("<%s min=%s max=%s choices=%r>" %
270
            (self.__class__.__name__, self.min, self.max, self.choices))
271

    
272

    
273
class ArgChoice(ArgSuggest):
274
  """Choice argument.
275

276
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
277
  but value must be one of the choices.
278

279
  """
280

    
281

    
282
class ArgUnknown(_Argument):
283
  """Unknown argument to program (e.g. determined at runtime).
284

285
  """
286

    
287

    
288
class ArgInstance(_Argument):
289
  """Instances argument.
290

291
  """
292

    
293

    
294
class ArgNode(_Argument):
295
  """Node argument.
296

297
  """
298

    
299

    
300
class ArgGroup(_Argument):
301
  """Node group argument.
302

303
  """
304

    
305

    
306
class ArgJobId(_Argument):
307
  """Job ID argument.
308

309
  """
310

    
311

    
312
class ArgFile(_Argument):
313
  """File path argument.
314

315
  """
316

    
317

    
318
class ArgCommand(_Argument):
319
  """Command argument.
320

321
  """
322

    
323

    
324
class ArgHost(_Argument):
325
  """Host argument.
326

327
  """
328

    
329

    
330
class ArgOs(_Argument):
331
  """OS argument.
332

333
  """
334

    
335

    
336
ARGS_NONE = []
337
ARGS_MANY_INSTANCES = [ArgInstance()]
338
ARGS_MANY_NODES = [ArgNode()]
339
ARGS_MANY_GROUPS = [ArgGroup()]
340
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
341
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
342
# TODO
343
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
344
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
345

    
346

    
347
def _ExtractTagsObject(opts, args):
348
  """Extract the tag type object.
349

350
  Note that this function will modify its args parameter.
351

352
  """
353
  if not hasattr(opts, "tag_type"):
354
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
355
  kind = opts.tag_type
356
  if kind == constants.TAG_CLUSTER:
357
    retval = kind, kind
358
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
359
    if not args:
360
      raise errors.OpPrereqError("no arguments passed to the command")
361
    name = args.pop(0)
362
    retval = kind, name
363
  else:
364
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
365
  return retval
366

    
367

    
368
def _ExtendTags(opts, args):
369
  """Extend the args if a source file has been given.
370

371
  This function will extend the tags with the contents of the file
372
  passed in the 'tags_source' attribute of the opts parameter. A file
373
  named '-' will be replaced by stdin.
374

375
  """
376
  fname = opts.tags_source
377
  if fname is None:
378
    return
379
  if fname == "-":
380
    new_fh = sys.stdin
381
  else:
382
    new_fh = open(fname, "r")
383
  new_data = []
384
  try:
385
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
386
    # because of python bug 1633941
387
    while True:
388
      line = new_fh.readline()
389
      if not line:
390
        break
391
      new_data.append(line.strip())
392
  finally:
393
    new_fh.close()
394
  args.extend(new_data)
395

    
396

    
397
def ListTags(opts, args):
398
  """List the tags on a given object.
399

400
  This is a generic implementation that knows how to deal with all
401
  three cases of tag objects (cluster, node, instance). The opts
402
  argument is expected to contain a tag_type field denoting what
403
  object type we work on.
404

405
  """
406
  kind, name = _ExtractTagsObject(opts, args)
407
  cl = GetClient()
408
  result = cl.QueryTags(kind, name)
409
  result = list(result)
410
  result.sort()
411
  for tag in result:
412
    ToStdout(tag)
413

    
414

    
415
def AddTags(opts, args):
416
  """Add tags on a given object.
417

418
  This is a generic implementation that knows how to deal with all
419
  three cases of tag objects (cluster, node, instance). The opts
420
  argument is expected to contain a tag_type field denoting what
421
  object type we work on.
422

423
  """
424
  kind, name = _ExtractTagsObject(opts, args)
425
  _ExtendTags(opts, args)
426
  if not args:
427
    raise errors.OpPrereqError("No tags to be added")
428
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
429
  SubmitOpCode(op, opts=opts)
430

    
431

    
432
def RemoveTags(opts, args):
433
  """Remove tags from a given object.
434

435
  This is a generic implementation that knows how to deal with all
436
  three cases of tag objects (cluster, node, instance). The opts
437
  argument is expected to contain a tag_type field denoting what
438
  object type we work on.
439

440
  """
441
  kind, name = _ExtractTagsObject(opts, args)
442
  _ExtendTags(opts, args)
443
  if not args:
444
    raise errors.OpPrereqError("No tags to be removed")
445
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
446
  SubmitOpCode(op, opts=opts)
447

    
448

    
449
def check_unit(option, opt, value): # pylint: disable-msg=W0613
450
  """OptParsers custom converter for units.
451

452
  """
453
  try:
454
    return utils.ParseUnit(value)
455
  except errors.UnitParseError, err:
456
    raise OptionValueError("option %s: %s" % (opt, err))
457

    
458

    
459
def _SplitKeyVal(opt, data):
460
  """Convert a KeyVal string into a dict.
461

462
  This function will convert a key=val[,...] string into a dict. Empty
463
  values will be converted specially: keys which have the prefix 'no_'
464
  will have the value=False and the prefix stripped, the others will
465
  have value=True.
466

467
  @type opt: string
468
  @param opt: a string holding the option name for which we process the
469
      data, used in building error messages
470
  @type data: string
471
  @param data: a string of the format key=val,key=val,...
472
  @rtype: dict
473
  @return: {key=val, key=val}
474
  @raises errors.ParameterError: if there are duplicate keys
475

476
  """
477
  kv_dict = {}
478
  if data:
479
    for elem in utils.UnescapeAndSplit(data, sep=","):
480
      if "=" in elem:
481
        key, val = elem.split("=", 1)
482
      else:
483
        if elem.startswith(NO_PREFIX):
484
          key, val = elem[len(NO_PREFIX):], False
485
        elif elem.startswith(UN_PREFIX):
486
          key, val = elem[len(UN_PREFIX):], None
487
        else:
488
          key, val = elem, True
489
      if key in kv_dict:
490
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
491
                                    (key, opt))
492
      kv_dict[key] = val
493
  return kv_dict
494

    
495

    
496
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
497
  """Custom parser for ident:key=val,key=val options.
498

499
  This will store the parsed values as a tuple (ident, {key: val}). As such,
500
  multiple uses of this option via action=append is possible.
501

502
  """
503
  if ":" not in value:
504
    ident, rest = value, ''
505
  else:
506
    ident, rest = value.split(":", 1)
507

    
508
  if ident.startswith(NO_PREFIX):
509
    if rest:
510
      msg = "Cannot pass options when removing parameter groups: %s" % value
511
      raise errors.ParameterError(msg)
512
    retval = (ident[len(NO_PREFIX):], False)
513
  elif ident.startswith(UN_PREFIX):
514
    if rest:
515
      msg = "Cannot pass options when removing parameter groups: %s" % value
516
      raise errors.ParameterError(msg)
517
    retval = (ident[len(UN_PREFIX):], None)
518
  else:
519
    kv_dict = _SplitKeyVal(opt, rest)
520
    retval = (ident, kv_dict)
521
  return retval
522

    
523

    
524
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
525
  """Custom parser class for key=val,key=val options.
526

527
  This will store the parsed values as a dict {key: val}.
528

529
  """
530
  return _SplitKeyVal(opt, value)
531

    
532

    
533
def check_bool(option, opt, value): # pylint: disable-msg=W0613
534
  """Custom parser for yes/no options.
535

536
  This will store the parsed value as either True or False.
537

538
  """
539
  value = value.lower()
540
  if value == constants.VALUE_FALSE or value == "no":
541
    return False
542
  elif value == constants.VALUE_TRUE or value == "yes":
543
    return True
544
  else:
545
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
546

    
547

    
548
# completion_suggestion is normally a list. Using numeric values not evaluating
549
# to False for dynamic completion.
550
(OPT_COMPL_MANY_NODES,
551
 OPT_COMPL_ONE_NODE,
552
 OPT_COMPL_ONE_INSTANCE,
553
 OPT_COMPL_ONE_OS,
554
 OPT_COMPL_ONE_IALLOCATOR,
555
 OPT_COMPL_INST_ADD_NODES,
556
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
557

    
558
OPT_COMPL_ALL = frozenset([
559
  OPT_COMPL_MANY_NODES,
560
  OPT_COMPL_ONE_NODE,
561
  OPT_COMPL_ONE_INSTANCE,
562
  OPT_COMPL_ONE_OS,
563
  OPT_COMPL_ONE_IALLOCATOR,
564
  OPT_COMPL_INST_ADD_NODES,
565
  OPT_COMPL_ONE_NODEGROUP,
566
  ])
567

    
568

    
569
class CliOption(Option):
570
  """Custom option class for optparse.
571

572
  """
573
  ATTRS = Option.ATTRS + [
574
    "completion_suggest",
575
    ]
576
  TYPES = Option.TYPES + (
577
    "identkeyval",
578
    "keyval",
579
    "unit",
580
    "bool",
581
    )
582
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
583
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
584
  TYPE_CHECKER["keyval"] = check_key_val
585
  TYPE_CHECKER["unit"] = check_unit
586
  TYPE_CHECKER["bool"] = check_bool
587

    
588

    
589
# optparse.py sets make_option, so we do it for our own option class, too
590
cli_option = CliOption
591

    
592

    
593
_YORNO = "yes|no"
594

    
595
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
596
                       help="Increase debugging level")
597

    
598
NOHDR_OPT = cli_option("--no-headers", default=False,
599
                       action="store_true", dest="no_headers",
600
                       help="Don't display column headers")
601

    
602
SEP_OPT = cli_option("--separator", default=None,
603
                     action="store", dest="separator",
604
                     help=("Separator between output fields"
605
                           " (defaults to one space)"))
606

    
607
USEUNITS_OPT = cli_option("--units", default=None,
608
                          dest="units", choices=('h', 'm', 'g', 't'),
609
                          help="Specify units for output (one of h/m/g/t)")
610

    
611
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
612
                        type="string", metavar="FIELDS",
613
                        help="Comma separated list of output fields")
614

    
615
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
616
                       default=False, help="Force the operation")
617

    
618
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
619
                         default=False, help="Do not require confirmation")
620

    
621
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
622
                                  action="store_true", default=False,
623
                                  help=("Ignore offline nodes and do as much"
624
                                        " as possible"))
625

    
626
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
627
                         default=None, help="File with tag names")
628

    
629
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
630
                        default=False, action="store_true",
631
                        help=("Submit the job and return the job ID, but"
632
                              " don't wait for the job to finish"))
633

    
634
SYNC_OPT = cli_option("--sync", dest="do_locking",
635
                      default=False, action="store_true",
636
                      help=("Grab locks while doing the queries"
637
                            " in order to ensure more consistent results"))
638

    
639
DRY_RUN_OPT = cli_option("--dry-run", default=False,
640
                         action="store_true",
641
                         help=("Do not execute the operation, just run the"
642
                               " check steps and verify it it could be"
643
                               " executed"))
644

    
645
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
646
                         action="store_true",
647
                         help="Increase the verbosity of the operation")
648

    
649
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
650
                              action="store_true", dest="simulate_errors",
651
                              help="Debugging option that makes the operation"
652
                              " treat most runtime checks as failed")
653

    
654
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
655
                        default=True, action="store_false",
656
                        help="Don't wait for sync (DANGEROUS!)")
657

    
658
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
659
                               help="Custom disk setup (diskless, file,"
660
                               " plain or drbd)",
661
                               default=None, metavar="TEMPL",
662
                               choices=list(constants.DISK_TEMPLATES))
663

    
664
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
665
                        help="Do not create any network cards for"
666
                        " the instance")
667

    
668
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
669
                               help="Relative path under default cluster-wide"
670
                               " file storage dir to store file-based disks",
671
                               default=None, metavar="<DIR>")
672

    
673
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
674
                                  help="Driver to use for image files",
675
                                  default="loop", metavar="<DRIVER>",
676
                                  choices=list(constants.FILE_DRIVER))
677

    
678
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
679
                            help="Select nodes for the instance automatically"
680
                            " using the <NAME> iallocator plugin",
681
                            default=None, type="string",
682
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
683

    
684
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
685
                            metavar="<NAME>",
686
                            help="Set the default instance allocator plugin",
687
                            default=None, type="string",
688
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
689

    
690
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
691
                    metavar="<os>",
692
                    completion_suggest=OPT_COMPL_ONE_OS)
693

    
694
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
695
                         type="keyval", default={},
696
                         help="OS parameters")
697

    
698
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
699
                               action="store_true", default=False,
700
                               help="Force an unknown variant")
701

    
702
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
703
                            action="store_true", default=False,
704
                            help="Do not install the OS (will"
705
                            " enable no-start)")
706

    
707
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
708
                         type="keyval", default={},
709
                         help="Backend parameters")
710

    
711
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
712
                         default={}, dest="hvparams",
713
                         help="Hypervisor parameters")
714

    
715
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
716
                            help="Hypervisor and hypervisor options, in the"
717
                            " format hypervisor:option=value,option=value,...",
718
                            default=None, type="identkeyval")
719

    
720
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
721
                        help="Hypervisor and hypervisor options, in the"
722
                        " format hypervisor:option=value,option=value,...",
723
                        default=[], action="append", type="identkeyval")
724

    
725
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
726
                           action="store_false",
727
                           help="Don't check that the instance's IP"
728
                           " is alive")
729

    
730
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
731
                             default=True, action="store_false",
732
                             help="Don't check that the instance's name"
733
                             " is resolvable")
734

    
735
NET_OPT = cli_option("--net",
736
                     help="NIC parameters", default=[],
737
                     dest="nics", action="append", type="identkeyval")
738

    
739
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
740
                      dest="disks", action="append", type="identkeyval")
741

    
742
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
743
                         help="Comma-separated list of disks"
744
                         " indices to act on (e.g. 0,2) (optional,"
745
                         " defaults to all disks)")
746

    
747
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
748
                         help="Enforces a single-disk configuration using the"
749
                         " given disk size, in MiB unless a suffix is used",
750
                         default=None, type="unit", metavar="<size>")
751

    
752
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
753
                                dest="ignore_consistency",
754
                                action="store_true", default=False,
755
                                help="Ignore the consistency of the disks on"
756
                                " the secondary")
757

    
758
NONLIVE_OPT = cli_option("--non-live", dest="live",
759
                         default=True, action="store_false",
760
                         help="Do a non-live migration (this usually means"
761
                         " freeze the instance, save the state, transfer and"
762
                         " only then resume running on the secondary node)")
763

    
764
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
765
                                default=None,
766
                                choices=list(constants.HT_MIGRATION_MODES),
767
                                help="Override default migration mode (choose"
768
                                " either live or non-live")
769

    
770
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
771
                                help="Target node and optional secondary node",
772
                                metavar="<pnode>[:<snode>]",
773
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
774

    
775
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
776
                           action="append", metavar="<node>",
777
                           help="Use only this node (can be used multiple"
778
                           " times, if not given defaults to all nodes)",
779
                           completion_suggest=OPT_COMPL_ONE_NODE)
780

    
781
NODEGROUP_OPT = cli_option("-g", "--node-group",
782
                           dest="nodegroup",
783
                           help="Node group (name or uuid)",
784
                           metavar="<nodegroup>",
785
                           default=None, type="string",
786
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
787

    
788
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
789
                             metavar="<node>",
790
                             completion_suggest=OPT_COMPL_ONE_NODE)
791

    
792
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
793
                         action="store_false",
794
                         help="Don't start the instance after creation")
795

    
796
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
797
                         action="store_true", default=False,
798
                         help="Show command instead of executing it")
799

    
800
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
801
                         default=False, action="store_true",
802
                         help="Instead of performing the migration, try to"
803
                         " recover from a failed cleanup. This is safe"
804
                         " to run even if the instance is healthy, but it"
805
                         " will create extra replication traffic and "
806
                         " disrupt briefly the replication (like during the"
807
                         " migration")
808

    
809
STATIC_OPT = cli_option("-s", "--static", dest="static",
810
                        action="store_true", default=False,
811
                        help="Only show configuration data, not runtime data")
812

    
813
ALL_OPT = cli_option("--all", dest="show_all",
814
                     default=False, action="store_true",
815
                     help="Show info on all instances on the cluster."
816
                     " This can take a long time to run, use wisely")
817

    
818
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
819
                           action="store_true", default=False,
820
                           help="Interactive OS reinstall, lists available"
821
                           " OS templates for selection")
822

    
823
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
824
                                 action="store_true", default=False,
825
                                 help="Remove the instance from the cluster"
826
                                 " configuration even if there are failures"
827
                                 " during the removal process")
828

    
829
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
830
                                        dest="ignore_remove_failures",
831
                                        action="store_true", default=False,
832
                                        help="Remove the instance from the"
833
                                        " cluster configuration even if there"
834
                                        " are failures during the removal"
835
                                        " process")
836

    
837
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
838
                                 action="store_true", default=False,
839
                                 help="Remove the instance from the cluster")
840

    
841
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
842
                               help="Specifies the new secondary node",
843
                               metavar="NODE", default=None,
844
                               completion_suggest=OPT_COMPL_ONE_NODE)
845

    
846
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
847
                            default=False, action="store_true",
848
                            help="Replace the disk(s) on the primary"
849
                            " node (only for the drbd template)")
850

    
851
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
852
                              default=False, action="store_true",
853
                              help="Replace the disk(s) on the secondary"
854
                              " node (only for the drbd template)")
855

    
856
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
857
                              default=False, action="store_true",
858
                              help="Lock all nodes and auto-promote as needed"
859
                              " to MC status")
860

    
861
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
862
                              default=False, action="store_true",
863
                              help="Automatically replace faulty disks"
864
                              " (only for the drbd template)")
865

    
866
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
867
                             default=False, action="store_true",
868
                             help="Ignore current recorded size"
869
                             " (useful for forcing activation when"
870
                             " the recorded size is wrong)")
871

    
872
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
873
                          metavar="<node>",
874
                          completion_suggest=OPT_COMPL_ONE_NODE)
875

    
876
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
877
                         metavar="<dir>")
878

    
879
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
880
                              help="Specify the secondary ip for the node",
881
                              metavar="ADDRESS", default=None)
882

    
883
READD_OPT = cli_option("--readd", dest="readd",
884
                       default=False, action="store_true",
885
                       help="Readd old node after replacing it")
886

    
887
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
888
                                default=True, action="store_false",
889
                                help="Disable SSH key fingerprint checking")
890

    
891
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
892
                                 default=False, action="store_true",
893
                                 help="Force the joining of a node")
894

    
895
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
896
                    type="bool", default=None, metavar=_YORNO,
897
                    help="Set the master_candidate flag on the node")
898

    
899
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
900
                         type="bool", default=None,
901
                         help=("Set the offline flag on the node"
902
                               " (cluster does not communicate with offline"
903
                               " nodes)"))
904

    
905
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
906
                         type="bool", default=None,
907
                         help=("Set the drained flag on the node"
908
                               " (excluded from allocation operations)"))
909

    
910
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
911
                    type="bool", default=None, metavar=_YORNO,
912
                    help="Set the master_capable flag on the node")
913

    
914
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
915
                    type="bool", default=None, metavar=_YORNO,
916
                    help="Set the vm_capable flag on the node")
917

    
918
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
919
                             type="bool", default=None, metavar=_YORNO,
920
                             help="Set the allocatable flag on a volume")
921

    
922
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
923
                               help="Disable support for lvm based instances"
924
                               " (cluster-wide)",
925
                               action="store_false", default=True)
926

    
927
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
928
                            dest="enabled_hypervisors",
929
                            help="Comma-separated list of hypervisors",
930
                            type="string", default=None)
931

    
932
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
933
                            type="keyval", default={},
934
                            help="NIC parameters")
935

    
936
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
937
                         dest="candidate_pool_size", type="int",
938
                         help="Set the candidate pool size")
939

    
940
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
941
                         help=("Enables LVM and specifies the volume group"
942
                               " name (cluster-wide) for disk allocation"
943
                               " [%s]" % constants.DEFAULT_VG),
944
                         metavar="VG", default=None)
945

    
946
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
947
                          help="Destroy cluster", action="store_true")
948

    
949
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
950
                          help="Skip node agreement check (dangerous)",
951
                          action="store_true", default=False)
952

    
953
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
954
                            help="Specify the mac prefix for the instance IP"
955
                            " addresses, in the format XX:XX:XX",
956
                            metavar="PREFIX",
957
                            default=None)
958

    
959
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
960
                               help="Specify the node interface (cluster-wide)"
961
                               " on which the master IP address will be added"
962
                               " (cluster init default: %s)" %
963
                               constants.DEFAULT_BRIDGE,
964
                               metavar="NETDEV",
965
                               default=None)
966

    
967
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
968
                                help="Specify the default directory (cluster-"
969
                                "wide) for storing the file-based disks [%s]" %
970
                                constants.DEFAULT_FILE_STORAGE_DIR,
971
                                metavar="DIR",
972
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
973

    
974
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
975
                                   help="Don't modify /etc/hosts",
976
                                   action="store_false", default=True)
977

    
978
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
979
                                    help="Don't initialize SSH keys",
980
                                    action="store_false", default=True)
981

    
982
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
983
                             help="Enable parseable error messages",
984
                             action="store_true", default=False)
985

    
986
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
987
                          help="Skip N+1 memory redundancy tests",
988
                          action="store_true", default=False)
989

    
990
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
991
                             help="Type of reboot: soft/hard/full",
992
                             default=constants.INSTANCE_REBOOT_HARD,
993
                             metavar="<REBOOT>",
994
                             choices=list(constants.REBOOT_TYPES))
995

    
996
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
997
                                    dest="ignore_secondaries",
998
                                    default=False, action="store_true",
999
                                    help="Ignore errors from secondaries")
1000

    
1001
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1002
                            action="store_false", default=True,
1003
                            help="Don't shutdown the instance (unsafe)")
1004

    
1005
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1006
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1007
                         help="Maximum time to wait")
1008

    
1009
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1010
                         dest="shutdown_timeout", type="int",
1011
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1012
                         help="Maximum time to wait for instance shutdown")
1013

    
1014
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1015
                          default=None,
1016
                          help=("Number of seconds between repetions of the"
1017
                                " command"))
1018

    
1019
EARLY_RELEASE_OPT = cli_option("--early-release",
1020
                               dest="early_release", default=False,
1021
                               action="store_true",
1022
                               help="Release the locks on the secondary"
1023
                               " node(s) early")
1024

    
1025
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1026
                                  dest="new_cluster_cert",
1027
                                  default=False, action="store_true",
1028
                                  help="Generate a new cluster certificate")
1029

    
1030
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1031
                           default=None,
1032
                           help="File containing new RAPI certificate")
1033

    
1034
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1035
                               default=None, action="store_true",
1036
                               help=("Generate a new self-signed RAPI"
1037
                                     " certificate"))
1038

    
1039
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1040
                                    dest="new_confd_hmac_key",
1041
                                    default=False, action="store_true",
1042
                                    help=("Create a new HMAC key for %s" %
1043
                                          constants.CONFD))
1044

    
1045
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1046
                                       dest="cluster_domain_secret",
1047
                                       default=None,
1048
                                       help=("Load new new cluster domain"
1049
                                             " secret from file"))
1050

    
1051
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1052
                                           dest="new_cluster_domain_secret",
1053
                                           default=False, action="store_true",
1054
                                           help=("Create a new cluster domain"
1055
                                                 " secret"))
1056

    
1057
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1058
                              dest="use_replication_network",
1059
                              help="Whether to use the replication network"
1060
                              " for talking to the nodes",
1061
                              action="store_true", default=False)
1062

    
1063
MAINTAIN_NODE_HEALTH_OPT = \
1064
    cli_option("--maintain-node-health", dest="maintain_node_health",
1065
               metavar=_YORNO, default=None, type="bool",
1066
               help="Configure the cluster to automatically maintain node"
1067
               " health, by shutting down unknown instances, shutting down"
1068
               " unknown DRBD devices, etc.")
1069

    
1070
IDENTIFY_DEFAULTS_OPT = \
1071
    cli_option("--identify-defaults", dest="identify_defaults",
1072
               default=False, action="store_true",
1073
               help="Identify which saved instance parameters are equal to"
1074
               " the current cluster defaults and set them as such, instead"
1075
               " of marking them as overridden")
1076

    
1077
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1078
                         action="store", dest="uid_pool",
1079
                         help=("A list of user-ids or user-id"
1080
                               " ranges separated by commas"))
1081

    
1082
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1083
                          action="store", dest="add_uids",
1084
                          help=("A list of user-ids or user-id"
1085
                                " ranges separated by commas, to be"
1086
                                " added to the user-id pool"))
1087

    
1088
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1089
                             action="store", dest="remove_uids",
1090
                             help=("A list of user-ids or user-id"
1091
                                   " ranges separated by commas, to be"
1092
                                   " removed from the user-id pool"))
1093

    
1094
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1095
                             action="store", dest="reserved_lvs",
1096
                             help=("A comma-separated list of reserved"
1097
                                   " logical volumes names, that will be"
1098
                                   " ignored by cluster verify"))
1099

    
1100
ROMAN_OPT = cli_option("--roman",
1101
                       dest="roman_integers", default=False,
1102
                       action="store_true",
1103
                       help="Use roman numbers for positive integers")
1104

    
1105
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1106
                             action="store", default=None,
1107
                             help="Specifies usermode helper for DRBD")
1108

    
1109
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1110
                                action="store_false", default=True,
1111
                                help="Disable support for DRBD")
1112

    
1113
PRIMARY_IP_VERSION_OPT = \
1114
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1115
               action="store", dest="primary_ip_version",
1116
               metavar="%d|%d" % (constants.IP4_VERSION,
1117
                                  constants.IP6_VERSION),
1118
               help="Cluster-wide IP version for primary IP")
1119

    
1120
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1121
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1122
                          choices=_PRIONAME_TO_VALUE.keys(),
1123
                          help="Priority for opcode processing")
1124

    
1125
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1126
                        type="bool", default=None, metavar=_YORNO,
1127
                        help="Sets the hidden flag on the OS")
1128

    
1129
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1130
                        type="bool", default=None, metavar=_YORNO,
1131
                        help="Sets the blacklisted flag on the OS")
1132

    
1133
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1134
                                     type="bool", metavar=_YORNO,
1135
                                     dest="prealloc_wipe_disks",
1136
                                     help=("Wipe disks prior to instance"
1137
                                           " creation"))
1138

    
1139
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1140
                             type="keyval", default=None,
1141
                             help="Node parameters")
1142

    
1143
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1144
                              action="store", metavar="POLICY", default=None,
1145
                              help="Allocation policy for the node group")
1146

    
1147
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1148
                              type="bool", metavar=_YORNO,
1149
                              dest="node_powered",
1150
                              help="Specify if the SoR for node is powered")
1151

    
1152

    
1153
#: Options provided by all commands
1154
COMMON_OPTS = [DEBUG_OPT]
1155

    
1156
# common options for creating instances. add and import then add their own
1157
# specific ones.
1158
COMMON_CREATE_OPTS = [
1159
  BACKEND_OPT,
1160
  DISK_OPT,
1161
  DISK_TEMPLATE_OPT,
1162
  FILESTORE_DIR_OPT,
1163
  FILESTORE_DRIVER_OPT,
1164
  HYPERVISOR_OPT,
1165
  IALLOCATOR_OPT,
1166
  NET_OPT,
1167
  NODE_PLACEMENT_OPT,
1168
  NOIPCHECK_OPT,
1169
  NONAMECHECK_OPT,
1170
  NONICS_OPT,
1171
  NWSYNC_OPT,
1172
  OSPARAMS_OPT,
1173
  OS_SIZE_OPT,
1174
  SUBMIT_OPT,
1175
  DRY_RUN_OPT,
1176
  PRIORITY_OPT,
1177
  ]
1178

    
1179

    
1180
def _ParseArgs(argv, commands, aliases):
1181
  """Parser for the command line arguments.
1182

1183
  This function parses the arguments and returns the function which
1184
  must be executed together with its (modified) arguments.
1185

1186
  @param argv: the command line
1187
  @param commands: dictionary with special contents, see the design
1188
      doc for cmdline handling
1189
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1190

1191
  """
1192
  if len(argv) == 0:
1193
    binary = "<command>"
1194
  else:
1195
    binary = argv[0].split("/")[-1]
1196

    
1197
  if len(argv) > 1 and argv[1] == "--version":
1198
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1199
             constants.RELEASE_VERSION)
1200
    # Quit right away. That way we don't have to care about this special
1201
    # argument. optparse.py does it the same.
1202
    sys.exit(0)
1203

    
1204
  if len(argv) < 2 or not (argv[1] in commands or
1205
                           argv[1] in aliases):
1206
    # let's do a nice thing
1207
    sortedcmds = commands.keys()
1208
    sortedcmds.sort()
1209

    
1210
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1211
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1212
    ToStdout("")
1213

    
1214
    # compute the max line length for cmd + usage
1215
    mlen = max([len(" %s" % cmd) for cmd in commands])
1216
    mlen = min(60, mlen) # should not get here...
1217

    
1218
    # and format a nice command list
1219
    ToStdout("Commands:")
1220
    for cmd in sortedcmds:
1221
      cmdstr = " %s" % (cmd,)
1222
      help_text = commands[cmd][4]
1223
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1224
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1225
      for line in help_lines:
1226
        ToStdout("%-*s   %s", mlen, "", line)
1227

    
1228
    ToStdout("")
1229

    
1230
    return None, None, None
1231

    
1232
  # get command, unalias it, and look it up in commands
1233
  cmd = argv.pop(1)
1234
  if cmd in aliases:
1235
    if cmd in commands:
1236
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1237
                                   " command" % cmd)
1238

    
1239
    if aliases[cmd] not in commands:
1240
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1241
                                   " command '%s'" % (cmd, aliases[cmd]))
1242

    
1243
    cmd = aliases[cmd]
1244

    
1245
  func, args_def, parser_opts, usage, description = commands[cmd]
1246
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1247
                        description=description,
1248
                        formatter=TitledHelpFormatter(),
1249
                        usage="%%prog %s %s" % (cmd, usage))
1250
  parser.disable_interspersed_args()
1251
  options, args = parser.parse_args()
1252

    
1253
  if not _CheckArguments(cmd, args_def, args):
1254
    return None, None, None
1255

    
1256
  return func, options, args
1257

    
1258

    
1259
def _CheckArguments(cmd, args_def, args):
1260
  """Verifies the arguments using the argument definition.
1261

1262
  Algorithm:
1263

1264
    1. Abort with error if values specified by user but none expected.
1265

1266
    1. For each argument in definition
1267

1268
      1. Keep running count of minimum number of values (min_count)
1269
      1. Keep running count of maximum number of values (max_count)
1270
      1. If it has an unlimited number of values
1271

1272
        1. Abort with error if it's not the last argument in the definition
1273

1274
    1. If last argument has limited number of values
1275

1276
      1. Abort with error if number of values doesn't match or is too large
1277

1278
    1. Abort with error if user didn't pass enough values (min_count)
1279

1280
  """
1281
  if args and not args_def:
1282
    ToStderr("Error: Command %s expects no arguments", cmd)
1283
    return False
1284

    
1285
  min_count = None
1286
  max_count = None
1287
  check_max = None
1288

    
1289
  last_idx = len(args_def) - 1
1290

    
1291
  for idx, arg in enumerate(args_def):
1292
    if min_count is None:
1293
      min_count = arg.min
1294
    elif arg.min is not None:
1295
      min_count += arg.min
1296

    
1297
    if max_count is None:
1298
      max_count = arg.max
1299
    elif arg.max is not None:
1300
      max_count += arg.max
1301

    
1302
    if idx == last_idx:
1303
      check_max = (arg.max is not None)
1304

    
1305
    elif arg.max is None:
1306
      raise errors.ProgrammerError("Only the last argument can have max=None")
1307

    
1308
  if check_max:
1309
    # Command with exact number of arguments
1310
    if (min_count is not None and max_count is not None and
1311
        min_count == max_count and len(args) != min_count):
1312
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1313
      return False
1314

    
1315
    # Command with limited number of arguments
1316
    if max_count is not None and len(args) > max_count:
1317
      ToStderr("Error: Command %s expects only %d argument(s)",
1318
               cmd, max_count)
1319
      return False
1320

    
1321
  # Command with some required arguments
1322
  if min_count is not None and len(args) < min_count:
1323
    ToStderr("Error: Command %s expects at least %d argument(s)",
1324
             cmd, min_count)
1325
    return False
1326

    
1327
  return True
1328

    
1329

    
1330
def SplitNodeOption(value):
1331
  """Splits the value of a --node option.
1332

1333
  """
1334
  if value and ':' in value:
1335
    return value.split(':', 1)
1336
  else:
1337
    return (value, None)
1338

    
1339

    
1340
def CalculateOSNames(os_name, os_variants):
1341
  """Calculates all the names an OS can be called, according to its variants.
1342

1343
  @type os_name: string
1344
  @param os_name: base name of the os
1345
  @type os_variants: list or None
1346
  @param os_variants: list of supported variants
1347
  @rtype: list
1348
  @return: list of valid names
1349

1350
  """
1351
  if os_variants:
1352
    return ['%s+%s' % (os_name, v) for v in os_variants]
1353
  else:
1354
    return [os_name]
1355

    
1356

    
1357
def ParseFields(selected, default):
1358
  """Parses the values of "--field"-like options.
1359

1360
  @type selected: string or None
1361
  @param selected: User-selected options
1362
  @type default: list
1363
  @param default: Default fields
1364

1365
  """
1366
  if selected is None:
1367
    return default
1368

    
1369
  if selected.startswith("+"):
1370
    return default + selected[1:].split(",")
1371

    
1372
  return selected.split(",")
1373

    
1374

    
1375
UsesRPC = rpc.RunWithRPC
1376

    
1377

    
1378
def AskUser(text, choices=None):
1379
  """Ask the user a question.
1380

1381
  @param text: the question to ask
1382

1383
  @param choices: list with elements tuples (input_char, return_value,
1384
      description); if not given, it will default to: [('y', True,
1385
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1386
      note that the '?' char is reserved for help
1387

1388
  @return: one of the return values from the choices list; if input is
1389
      not possible (i.e. not running with a tty, we return the last
1390
      entry from the list
1391

1392
  """
1393
  if choices is None:
1394
    choices = [('y', True, 'Perform the operation'),
1395
               ('n', False, 'Do not perform the operation')]
1396
  if not choices or not isinstance(choices, list):
1397
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1398
  for entry in choices:
1399
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1400
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1401

    
1402
  answer = choices[-1][1]
1403
  new_text = []
1404
  for line in text.splitlines():
1405
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1406
  text = "\n".join(new_text)
1407
  try:
1408
    f = file("/dev/tty", "a+")
1409
  except IOError:
1410
    return answer
1411
  try:
1412
    chars = [entry[0] for entry in choices]
1413
    chars[-1] = "[%s]" % chars[-1]
1414
    chars.append('?')
1415
    maps = dict([(entry[0], entry[1]) for entry in choices])
1416
    while True:
1417
      f.write(text)
1418
      f.write('\n')
1419
      f.write("/".join(chars))
1420
      f.write(": ")
1421
      line = f.readline(2).strip().lower()
1422
      if line in maps:
1423
        answer = maps[line]
1424
        break
1425
      elif line == '?':
1426
        for entry in choices:
1427
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1428
        f.write("\n")
1429
        continue
1430
  finally:
1431
    f.close()
1432
  return answer
1433

    
1434

    
1435
class JobSubmittedException(Exception):
1436
  """Job was submitted, client should exit.
1437

1438
  This exception has one argument, the ID of the job that was
1439
  submitted. The handler should print this ID.
1440

1441
  This is not an error, just a structured way to exit from clients.
1442

1443
  """
1444

    
1445

    
1446
def SendJob(ops, cl=None):
1447
  """Function to submit an opcode without waiting for the results.
1448

1449
  @type ops: list
1450
  @param ops: list of opcodes
1451
  @type cl: luxi.Client
1452
  @param cl: the luxi client to use for communicating with the master;
1453
             if None, a new client will be created
1454

1455
  """
1456
  if cl is None:
1457
    cl = GetClient()
1458

    
1459
  job_id = cl.SubmitJob(ops)
1460

    
1461
  return job_id
1462

    
1463

    
1464
def GenericPollJob(job_id, cbs, report_cbs):
1465
  """Generic job-polling function.
1466

1467
  @type job_id: number
1468
  @param job_id: Job ID
1469
  @type cbs: Instance of L{JobPollCbBase}
1470
  @param cbs: Data callbacks
1471
  @type report_cbs: Instance of L{JobPollReportCbBase}
1472
  @param report_cbs: Reporting callbacks
1473

1474
  """
1475
  prev_job_info = None
1476
  prev_logmsg_serial = None
1477

    
1478
  status = None
1479

    
1480
  while True:
1481
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1482
                                      prev_logmsg_serial)
1483
    if not result:
1484
      # job not found, go away!
1485
      raise errors.JobLost("Job with id %s lost" % job_id)
1486

    
1487
    if result == constants.JOB_NOTCHANGED:
1488
      report_cbs.ReportNotChanged(job_id, status)
1489

    
1490
      # Wait again
1491
      continue
1492

    
1493
    # Split result, a tuple of (field values, log entries)
1494
    (job_info, log_entries) = result
1495
    (status, ) = job_info
1496

    
1497
    if log_entries:
1498
      for log_entry in log_entries:
1499
        (serial, timestamp, log_type, message) = log_entry
1500
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1501
                                    log_type, message)
1502
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1503

    
1504
    # TODO: Handle canceled and archived jobs
1505
    elif status in (constants.JOB_STATUS_SUCCESS,
1506
                    constants.JOB_STATUS_ERROR,
1507
                    constants.JOB_STATUS_CANCELING,
1508
                    constants.JOB_STATUS_CANCELED):
1509
      break
1510

    
1511
    prev_job_info = job_info
1512

    
1513
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1514
  if not jobs:
1515
    raise errors.JobLost("Job with id %s lost" % job_id)
1516

    
1517
  status, opstatus, result = jobs[0]
1518

    
1519
  if status == constants.JOB_STATUS_SUCCESS:
1520
    return result
1521

    
1522
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1523
    raise errors.OpExecError("Job was canceled")
1524

    
1525
  has_ok = False
1526
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1527
    if status == constants.OP_STATUS_SUCCESS:
1528
      has_ok = True
1529
    elif status == constants.OP_STATUS_ERROR:
1530
      errors.MaybeRaise(msg)
1531

    
1532
      if has_ok:
1533
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1534
                                 (idx, msg))
1535

    
1536
      raise errors.OpExecError(str(msg))
1537

    
1538
  # default failure mode
1539
  raise errors.OpExecError(result)
1540

    
1541

    
1542
class JobPollCbBase:
1543
  """Base class for L{GenericPollJob} callbacks.
1544

1545
  """
1546
  def __init__(self):
1547
    """Initializes this class.
1548

1549
    """
1550

    
1551
  def WaitForJobChangeOnce(self, job_id, fields,
1552
                           prev_job_info, prev_log_serial):
1553
    """Waits for changes on a job.
1554

1555
    """
1556
    raise NotImplementedError()
1557

    
1558
  def QueryJobs(self, job_ids, fields):
1559
    """Returns the selected fields for the selected job IDs.
1560

1561
    @type job_ids: list of numbers
1562
    @param job_ids: Job IDs
1563
    @type fields: list of strings
1564
    @param fields: Fields
1565

1566
    """
1567
    raise NotImplementedError()
1568

    
1569

    
1570
class JobPollReportCbBase:
1571
  """Base class for L{GenericPollJob} reporting callbacks.
1572

1573
  """
1574
  def __init__(self):
1575
    """Initializes this class.
1576

1577
    """
1578

    
1579
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1580
    """Handles a log message.
1581

1582
    """
1583
    raise NotImplementedError()
1584

    
1585
  def ReportNotChanged(self, job_id, status):
1586
    """Called for if a job hasn't changed in a while.
1587

1588
    @type job_id: number
1589
    @param job_id: Job ID
1590
    @type status: string or None
1591
    @param status: Job status if available
1592

1593
    """
1594
    raise NotImplementedError()
1595

    
1596

    
1597
class _LuxiJobPollCb(JobPollCbBase):
1598
  def __init__(self, cl):
1599
    """Initializes this class.
1600

1601
    """
1602
    JobPollCbBase.__init__(self)
1603
    self.cl = cl
1604

    
1605
  def WaitForJobChangeOnce(self, job_id, fields,
1606
                           prev_job_info, prev_log_serial):
1607
    """Waits for changes on a job.
1608

1609
    """
1610
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1611
                                        prev_job_info, prev_log_serial)
1612

    
1613
  def QueryJobs(self, job_ids, fields):
1614
    """Returns the selected fields for the selected job IDs.
1615

1616
    """
1617
    return self.cl.QueryJobs(job_ids, fields)
1618

    
1619

    
1620
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1621
  def __init__(self, feedback_fn):
1622
    """Initializes this class.
1623

1624
    """
1625
    JobPollReportCbBase.__init__(self)
1626

    
1627
    self.feedback_fn = feedback_fn
1628

    
1629
    assert callable(feedback_fn)
1630

    
1631
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1632
    """Handles a log message.
1633

1634
    """
1635
    self.feedback_fn((timestamp, log_type, log_msg))
1636

    
1637
  def ReportNotChanged(self, job_id, status):
1638
    """Called if a job hasn't changed in a while.
1639

1640
    """
1641
    # Ignore
1642

    
1643

    
1644
class StdioJobPollReportCb(JobPollReportCbBase):
1645
  def __init__(self):
1646
    """Initializes this class.
1647

1648
    """
1649
    JobPollReportCbBase.__init__(self)
1650

    
1651
    self.notified_queued = False
1652
    self.notified_waitlock = False
1653

    
1654
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1655
    """Handles a log message.
1656

1657
    """
1658
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1659
             FormatLogMessage(log_type, log_msg))
1660

    
1661
  def ReportNotChanged(self, job_id, status):
1662
    """Called if a job hasn't changed in a while.
1663

1664
    """
1665
    if status is None:
1666
      return
1667

    
1668
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1669
      ToStderr("Job %s is waiting in queue", job_id)
1670
      self.notified_queued = True
1671

    
1672
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1673
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1674
      self.notified_waitlock = True
1675

    
1676

    
1677
def FormatLogMessage(log_type, log_msg):
1678
  """Formats a job message according to its type.
1679

1680
  """
1681
  if log_type != constants.ELOG_MESSAGE:
1682
    log_msg = str(log_msg)
1683

    
1684
  return utils.SafeEncode(log_msg)
1685

    
1686

    
1687
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1688
  """Function to poll for the result of a job.
1689

1690
  @type job_id: job identified
1691
  @param job_id: the job to poll for results
1692
  @type cl: luxi.Client
1693
  @param cl: the luxi client to use for communicating with the master;
1694
             if None, a new client will be created
1695

1696
  """
1697
  if cl is None:
1698
    cl = GetClient()
1699

    
1700
  if reporter is None:
1701
    if feedback_fn:
1702
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1703
    else:
1704
      reporter = StdioJobPollReportCb()
1705
  elif feedback_fn:
1706
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1707

    
1708
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1709

    
1710

    
1711
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1712
  """Legacy function to submit an opcode.
1713

1714
  This is just a simple wrapper over the construction of the processor
1715
  instance. It should be extended to better handle feedback and
1716
  interaction functions.
1717

1718
  """
1719
  if cl is None:
1720
    cl = GetClient()
1721

    
1722
  SetGenericOpcodeOpts([op], opts)
1723

    
1724
  job_id = SendJob([op], cl=cl)
1725

    
1726
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1727
                       reporter=reporter)
1728

    
1729
  return op_results[0]
1730

    
1731

    
1732
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1733
  """Wrapper around SubmitOpCode or SendJob.
1734

1735
  This function will decide, based on the 'opts' parameter, whether to
1736
  submit and wait for the result of the opcode (and return it), or
1737
  whether to just send the job and print its identifier. It is used in
1738
  order to simplify the implementation of the '--submit' option.
1739

1740
  It will also process the opcodes if we're sending the via SendJob
1741
  (otherwise SubmitOpCode does it).
1742

1743
  """
1744
  if opts and opts.submit_only:
1745
    job = [op]
1746
    SetGenericOpcodeOpts(job, opts)
1747
    job_id = SendJob(job, cl=cl)
1748
    raise JobSubmittedException(job_id)
1749
  else:
1750
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1751

    
1752

    
1753
def SetGenericOpcodeOpts(opcode_list, options):
1754
  """Processor for generic options.
1755

1756
  This function updates the given opcodes based on generic command
1757
  line options (like debug, dry-run, etc.).
1758

1759
  @param opcode_list: list of opcodes
1760
  @param options: command line options or None
1761
  @return: None (in-place modification)
1762

1763
  """
1764
  if not options:
1765
    return
1766
  for op in opcode_list:
1767
    op.debug_level = options.debug
1768
    if hasattr(options, "dry_run"):
1769
      op.dry_run = options.dry_run
1770
    if getattr(options, "priority", None) is not None:
1771
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1772

    
1773

    
1774
def GetClient():
1775
  # TODO: Cache object?
1776
  try:
1777
    client = luxi.Client()
1778
  except luxi.NoMasterError:
1779
    ss = ssconf.SimpleStore()
1780

    
1781
    # Try to read ssconf file
1782
    try:
1783
      ss.GetMasterNode()
1784
    except errors.ConfigurationError:
1785
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1786
                                 " not part of a cluster")
1787

    
1788
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1789
    if master != myself:
1790
      raise errors.OpPrereqError("This is not the master node, please connect"
1791
                                 " to node '%s' and rerun the command" %
1792
                                 master)
1793
    raise
1794
  return client
1795

    
1796

    
1797
def FormatError(err):
1798
  """Return a formatted error message for a given error.
1799

1800
  This function takes an exception instance and returns a tuple
1801
  consisting of two values: first, the recommended exit code, and
1802
  second, a string describing the error message (not
1803
  newline-terminated).
1804

1805
  """
1806
  retcode = 1
1807
  obuf = StringIO()
1808
  msg = str(err)
1809
  if isinstance(err, errors.ConfigurationError):
1810
    txt = "Corrupt configuration file: %s" % msg
1811
    logging.error(txt)
1812
    obuf.write(txt + "\n")
1813
    obuf.write("Aborting.")
1814
    retcode = 2
1815
  elif isinstance(err, errors.HooksAbort):
1816
    obuf.write("Failure: hooks execution failed:\n")
1817
    for node, script, out in err.args[0]:
1818
      if out:
1819
        obuf.write("  node: %s, script: %s, output: %s\n" %
1820
                   (node, script, out))
1821
      else:
1822
        obuf.write("  node: %s, script: %s (no output)\n" %
1823
                   (node, script))
1824
  elif isinstance(err, errors.HooksFailure):
1825
    obuf.write("Failure: hooks general failure: %s" % msg)
1826
  elif isinstance(err, errors.ResolverError):
1827
    this_host = netutils.Hostname.GetSysName()
1828
    if err.args[0] == this_host:
1829
      msg = "Failure: can't resolve my own hostname ('%s')"
1830
    else:
1831
      msg = "Failure: can't resolve hostname '%s'"
1832
    obuf.write(msg % err.args[0])
1833
  elif isinstance(err, errors.OpPrereqError):
1834
    if len(err.args) == 2:
1835
      obuf.write("Failure: prerequisites not met for this"
1836
               " operation:\nerror type: %s, error details:\n%s" %
1837
                 (err.args[1], err.args[0]))
1838
    else:
1839
      obuf.write("Failure: prerequisites not met for this"
1840
                 " operation:\n%s" % msg)
1841
  elif isinstance(err, errors.OpExecError):
1842
    obuf.write("Failure: command execution error:\n%s" % msg)
1843
  elif isinstance(err, errors.TagError):
1844
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1845
  elif isinstance(err, errors.JobQueueDrainError):
1846
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1847
               " accept new requests\n")
1848
  elif isinstance(err, errors.JobQueueFull):
1849
    obuf.write("Failure: the job queue is full and doesn't accept new"
1850
               " job submissions until old jobs are archived\n")
1851
  elif isinstance(err, errors.TypeEnforcementError):
1852
    obuf.write("Parameter Error: %s" % msg)
1853
  elif isinstance(err, errors.ParameterError):
1854
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1855
  elif isinstance(err, luxi.NoMasterError):
1856
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1857
               " and listening for connections?")
1858
  elif isinstance(err, luxi.TimeoutError):
1859
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1860
               " been submitted and will continue to run even if the call"
1861
               " timed out. Useful commands in this situation are \"gnt-job"
1862
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1863
    obuf.write(msg)
1864
  elif isinstance(err, luxi.PermissionError):
1865
    obuf.write("It seems you don't have permissions to connect to the"
1866
               " master daemon.\nPlease retry as a different user.")
1867
  elif isinstance(err, luxi.ProtocolError):
1868
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1869
               "%s" % msg)
1870
  elif isinstance(err, errors.JobLost):
1871
    obuf.write("Error checking job status: %s" % msg)
1872
  elif isinstance(err, errors.GenericError):
1873
    obuf.write("Unhandled Ganeti error: %s" % msg)
1874
  elif isinstance(err, JobSubmittedException):
1875
    obuf.write("JobID: %s\n" % err.args[0])
1876
    retcode = 0
1877
  else:
1878
    obuf.write("Unhandled exception: %s" % msg)
1879
  return retcode, obuf.getvalue().rstrip('\n')
1880

    
1881

    
1882
def GenericMain(commands, override=None, aliases=None):
1883
  """Generic main function for all the gnt-* commands.
1884

1885
  Arguments:
1886
    - commands: a dictionary with a special structure, see the design doc
1887
                for command line handling.
1888
    - override: if not None, we expect a dictionary with keys that will
1889
                override command line options; this can be used to pass
1890
                options from the scripts to generic functions
1891
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1892

1893
  """
1894
  # save the program name and the entire command line for later logging
1895
  if sys.argv:
1896
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1897
    if len(sys.argv) >= 2:
1898
      binary += " " + sys.argv[1]
1899
      old_cmdline = " ".join(sys.argv[2:])
1900
    else:
1901
      old_cmdline = ""
1902
  else:
1903
    binary = "<unknown program>"
1904
    old_cmdline = ""
1905

    
1906
  if aliases is None:
1907
    aliases = {}
1908

    
1909
  try:
1910
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1911
  except errors.ParameterError, err:
1912
    result, err_msg = FormatError(err)
1913
    ToStderr(err_msg)
1914
    return 1
1915

    
1916
  if func is None: # parse error
1917
    return 1
1918

    
1919
  if override is not None:
1920
    for key, val in override.iteritems():
1921
      setattr(options, key, val)
1922

    
1923
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1924
                     stderr_logging=True)
1925

    
1926
  if old_cmdline:
1927
    logging.info("run with arguments '%s'", old_cmdline)
1928
  else:
1929
    logging.info("run with no arguments")
1930

    
1931
  try:
1932
    result = func(options, args)
1933
  except (errors.GenericError, luxi.ProtocolError,
1934
          JobSubmittedException), err:
1935
    result, err_msg = FormatError(err)
1936
    logging.exception("Error during command processing")
1937
    ToStderr(err_msg)
1938
  except KeyboardInterrupt:
1939
    result = constants.EXIT_FAILURE
1940
    ToStderr("Aborted. Note that if the operation created any jobs, they"
1941
             " might have been submitted and"
1942
             " will continue to run in the background.")
1943
  except IOError, err:
1944
    if err.errno == errno.EPIPE:
1945
      # our terminal went away, we'll exit
1946
      sys.exit(constants.EXIT_FAILURE)
1947
    else:
1948
      raise
1949

    
1950
  return result
1951

    
1952

    
1953
def ParseNicOption(optvalue):
1954
  """Parses the value of the --net option(s).
1955

1956
  """
1957
  try:
1958
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1959
  except (TypeError, ValueError), err:
1960
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1961

    
1962
  nics = [{}] * nic_max
1963
  for nidx, ndict in optvalue:
1964
    nidx = int(nidx)
1965

    
1966
    if not isinstance(ndict, dict):
1967
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1968
                                 " got %s" % (nidx, ndict))
1969

    
1970
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1971

    
1972
    nics[nidx] = ndict
1973

    
1974
  return nics
1975

    
1976

    
1977
def GenericInstanceCreate(mode, opts, args):
1978
  """Add an instance to the cluster via either creation or import.
1979

1980
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1981
  @param opts: the command line options selected by the user
1982
  @type args: list
1983
  @param args: should contain only one element, the new instance name
1984
  @rtype: int
1985
  @return: the desired exit code
1986

1987
  """
1988
  instance = args[0]
1989

    
1990
  (pnode, snode) = SplitNodeOption(opts.node)
1991

    
1992
  hypervisor = None
1993
  hvparams = {}
1994
  if opts.hypervisor:
1995
    hypervisor, hvparams = opts.hypervisor
1996

    
1997
  if opts.nics:
1998
    nics = ParseNicOption(opts.nics)
1999
  elif opts.no_nics:
2000
    # no nics
2001
    nics = []
2002
  elif mode == constants.INSTANCE_CREATE:
2003
    # default of one nic, all auto
2004
    nics = [{}]
2005
  else:
2006
    # mode == import
2007
    nics = []
2008

    
2009
  if opts.disk_template == constants.DT_DISKLESS:
2010
    if opts.disks or opts.sd_size is not None:
2011
      raise errors.OpPrereqError("Diskless instance but disk"
2012
                                 " information passed")
2013
    disks = []
2014
  else:
2015
    if (not opts.disks and not opts.sd_size
2016
        and mode == constants.INSTANCE_CREATE):
2017
      raise errors.OpPrereqError("No disk information specified")
2018
    if opts.disks and opts.sd_size is not None:
2019
      raise errors.OpPrereqError("Please use either the '--disk' or"
2020
                                 " '-s' option")
2021
    if opts.sd_size is not None:
2022
      opts.disks = [(0, {"size": opts.sd_size})]
2023

    
2024
    if opts.disks:
2025
      try:
2026
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2027
      except ValueError, err:
2028
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2029
      disks = [{}] * disk_max
2030
    else:
2031
      disks = []
2032
    for didx, ddict in opts.disks:
2033
      didx = int(didx)
2034
      if not isinstance(ddict, dict):
2035
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2036
        raise errors.OpPrereqError(msg)
2037
      elif "size" in ddict:
2038
        if "adopt" in ddict:
2039
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2040
                                     " (disk %d)" % didx)
2041
        try:
2042
          ddict["size"] = utils.ParseUnit(ddict["size"])
2043
        except ValueError, err:
2044
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2045
                                     (didx, err))
2046
      elif "adopt" in ddict:
2047
        if mode == constants.INSTANCE_IMPORT:
2048
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2049
                                     " import")
2050
        ddict["size"] = 0
2051
      else:
2052
        raise errors.OpPrereqError("Missing size or adoption source for"
2053
                                   " disk %d" % didx)
2054
      disks[didx] = ddict
2055

    
2056
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2057
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2058

    
2059
  if mode == constants.INSTANCE_CREATE:
2060
    start = opts.start
2061
    os_type = opts.os
2062
    force_variant = opts.force_variant
2063
    src_node = None
2064
    src_path = None
2065
    no_install = opts.no_install
2066
    identify_defaults = False
2067
  elif mode == constants.INSTANCE_IMPORT:
2068
    start = False
2069
    os_type = None
2070
    force_variant = False
2071
    src_node = opts.src_node
2072
    src_path = opts.src_dir
2073
    no_install = None
2074
    identify_defaults = opts.identify_defaults
2075
  else:
2076
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2077

    
2078
  op = opcodes.OpInstanceCreate(instance_name=instance,
2079
                                disks=disks,
2080
                                disk_template=opts.disk_template,
2081
                                nics=nics,
2082
                                pnode=pnode, snode=snode,
2083
                                ip_check=opts.ip_check,
2084
                                name_check=opts.name_check,
2085
                                wait_for_sync=opts.wait_for_sync,
2086
                                file_storage_dir=opts.file_storage_dir,
2087
                                file_driver=opts.file_driver,
2088
                                iallocator=opts.iallocator,
2089
                                hypervisor=hypervisor,
2090
                                hvparams=hvparams,
2091
                                beparams=opts.beparams,
2092
                                osparams=opts.osparams,
2093
                                mode=mode,
2094
                                start=start,
2095
                                os_type=os_type,
2096
                                force_variant=force_variant,
2097
                                src_node=src_node,
2098
                                src_path=src_path,
2099
                                no_install=no_install,
2100
                                identify_defaults=identify_defaults)
2101

    
2102
  SubmitOrSend(op, opts)
2103
  return 0
2104

    
2105

    
2106
class _RunWhileClusterStoppedHelper:
2107
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2108

2109
  """
2110
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2111
    """Initializes this class.
2112

2113
    @type feedback_fn: callable
2114
    @param feedback_fn: Feedback function
2115
    @type cluster_name: string
2116
    @param cluster_name: Cluster name
2117
    @type master_node: string
2118
    @param master_node Master node name
2119
    @type online_nodes: list
2120
    @param online_nodes: List of names of online nodes
2121

2122
    """
2123
    self.feedback_fn = feedback_fn
2124
    self.cluster_name = cluster_name
2125
    self.master_node = master_node
2126
    self.online_nodes = online_nodes
2127

    
2128
    self.ssh = ssh.SshRunner(self.cluster_name)
2129

    
2130
    self.nonmaster_nodes = [name for name in online_nodes
2131
                            if name != master_node]
2132

    
2133
    assert self.master_node not in self.nonmaster_nodes
2134

    
2135
  def _RunCmd(self, node_name, cmd):
2136
    """Runs a command on the local or a remote machine.
2137

2138
    @type node_name: string
2139
    @param node_name: Machine name
2140
    @type cmd: list
2141
    @param cmd: Command
2142

2143
    """
2144
    if node_name is None or node_name == self.master_node:
2145
      # No need to use SSH
2146
      result = utils.RunCmd(cmd)
2147
    else:
2148
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2149

    
2150
    if result.failed:
2151
      errmsg = ["Failed to run command %s" % result.cmd]
2152
      if node_name:
2153
        errmsg.append("on node %s" % node_name)
2154
      errmsg.append(": exitcode %s and error %s" %
2155
                    (result.exit_code, result.output))
2156
      raise errors.OpExecError(" ".join(errmsg))
2157

    
2158
  def Call(self, fn, *args):
2159
    """Call function while all daemons are stopped.
2160

2161
    @type fn: callable
2162
    @param fn: Function to be called
2163

2164
    """
2165
    # Pause watcher by acquiring an exclusive lock on watcher state file
2166
    self.feedback_fn("Blocking watcher")
2167
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2168
    try:
2169
      # TODO: Currently, this just blocks. There's no timeout.
2170
      # TODO: Should it be a shared lock?
2171
      watcher_block.Exclusive(blocking=True)
2172

    
2173
      # Stop master daemons, so that no new jobs can come in and all running
2174
      # ones are finished
2175
      self.feedback_fn("Stopping master daemons")
2176
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2177
      try:
2178
        # Stop daemons on all nodes
2179
        for node_name in self.online_nodes:
2180
          self.feedback_fn("Stopping daemons on %s" % node_name)
2181
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2182

    
2183
        # All daemons are shut down now
2184
        try:
2185
          return fn(self, *args)
2186
        except Exception, err:
2187
          _, errmsg = FormatError(err)
2188
          logging.exception("Caught exception")
2189
          self.feedback_fn(errmsg)
2190
          raise
2191
      finally:
2192
        # Start cluster again, master node last
2193
        for node_name in self.nonmaster_nodes + [self.master_node]:
2194
          self.feedback_fn("Starting daemons on %s" % node_name)
2195
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2196
    finally:
2197
      # Resume watcher
2198
      watcher_block.Close()
2199

    
2200

    
2201
def RunWhileClusterStopped(feedback_fn, fn, *args):
2202
  """Calls a function while all cluster daemons are stopped.
2203

2204
  @type feedback_fn: callable
2205
  @param feedback_fn: Feedback function
2206
  @type fn: callable
2207
  @param fn: Function to be called when daemons are stopped
2208

2209
  """
2210
  feedback_fn("Gathering cluster information")
2211

    
2212
  # This ensures we're running on the master daemon
2213
  cl = GetClient()
2214

    
2215
  (cluster_name, master_node) = \
2216
    cl.QueryConfigValues(["cluster_name", "master_node"])
2217

    
2218
  online_nodes = GetOnlineNodes([], cl=cl)
2219

    
2220
  # Don't keep a reference to the client. The master daemon will go away.
2221
  del cl
2222

    
2223
  assert master_node in online_nodes
2224

    
2225
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2226
                                       online_nodes).Call(fn, *args)
2227

    
2228

    
2229
def GenerateTable(headers, fields, separator, data,
2230
                  numfields=None, unitfields=None,
2231
                  units=None):
2232
  """Prints a table with headers and different fields.
2233

2234
  @type headers: dict
2235
  @param headers: dictionary mapping field names to headers for
2236
      the table
2237
  @type fields: list
2238
  @param fields: the field names corresponding to each row in
2239
      the data field
2240
  @param separator: the separator to be used; if this is None,
2241
      the default 'smart' algorithm is used which computes optimal
2242
      field width, otherwise just the separator is used between
2243
      each field
2244
  @type data: list
2245
  @param data: a list of lists, each sublist being one row to be output
2246
  @type numfields: list
2247
  @param numfields: a list with the fields that hold numeric
2248
      values and thus should be right-aligned
2249
  @type unitfields: list
2250
  @param unitfields: a list with the fields that hold numeric
2251
      values that should be formatted with the units field
2252
  @type units: string or None
2253
  @param units: the units we should use for formatting, or None for
2254
      automatic choice (human-readable for non-separator usage, otherwise
2255
      megabytes); this is a one-letter string
2256

2257
  """
2258
  if units is None:
2259
    if separator:
2260
      units = "m"
2261
    else:
2262
      units = "h"
2263

    
2264
  if numfields is None:
2265
    numfields = []
2266
  if unitfields is None:
2267
    unitfields = []
2268

    
2269
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2270
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2271

    
2272
  format_fields = []
2273
  for field in fields:
2274
    if headers and field not in headers:
2275
      # TODO: handle better unknown fields (either revert to old
2276
      # style of raising exception, or deal more intelligently with
2277
      # variable fields)
2278
      headers[field] = field
2279
    if separator is not None:
2280
      format_fields.append("%s")
2281
    elif numfields.Matches(field):
2282
      format_fields.append("%*s")
2283
    else:
2284
      format_fields.append("%-*s")
2285

    
2286
  if separator is None:
2287
    mlens = [0 for name in fields]
2288
    format_str = ' '.join(format_fields)
2289
  else:
2290
    format_str = separator.replace("%", "%%").join(format_fields)
2291

    
2292
  for row in data:
2293
    if row is None:
2294
      continue
2295
    for idx, val in enumerate(row):
2296
      if unitfields.Matches(fields[idx]):
2297
        try:
2298
          val = int(val)
2299
        except (TypeError, ValueError):
2300
          pass
2301
        else:
2302
          val = row[idx] = utils.FormatUnit(val, units)
2303
      val = row[idx] = str(val)
2304
      if separator is None:
2305
        mlens[idx] = max(mlens[idx], len(val))
2306

    
2307
  result = []
2308
  if headers:
2309
    args = []
2310
    for idx, name in enumerate(fields):
2311
      hdr = headers[name]
2312
      if separator is None:
2313
        mlens[idx] = max(mlens[idx], len(hdr))
2314
        args.append(mlens[idx])
2315
      args.append(hdr)
2316
    result.append(format_str % tuple(args))
2317

    
2318
  if separator is None:
2319
    assert len(mlens) == len(fields)
2320

    
2321
    if fields and not numfields.Matches(fields[-1]):
2322
      mlens[-1] = 0
2323

    
2324
  for line in data:
2325
    args = []
2326
    if line is None:
2327
      line = ['-' for _ in fields]
2328
    for idx in range(len(fields)):
2329
      if separator is None:
2330
        args.append(mlens[idx])
2331
      args.append(line[idx])
2332
    result.append(format_str % tuple(args))
2333

    
2334
  return result
2335

    
2336

    
2337
def _FormatBool(value):
2338
  """Formats a boolean value as a string.
2339

2340
  """
2341
  if value:
2342
    return "Y"
2343
  return "N"
2344

    
2345

    
2346
#: Default formatting for query results; (callback, align right)
2347
_DEFAULT_FORMAT_QUERY = {
2348
  constants.QFT_TEXT: (str, False),
2349
  constants.QFT_BOOL: (_FormatBool, False),
2350
  constants.QFT_NUMBER: (str, True),
2351
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2352
  constants.QFT_OTHER: (str, False),
2353
  constants.QFT_UNKNOWN: (str, False),
2354
  }
2355

    
2356

    
2357
def _GetColumnFormatter(fdef, override, unit):
2358
  """Returns formatting function for a field.
2359

2360
  @type fdef: L{objects.QueryFieldDefinition}
2361
  @type override: dict
2362
  @param override: Dictionary for overriding field formatting functions,
2363
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2364
  @type unit: string
2365
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2366
  @rtype: tuple; (callable, bool)
2367
  @return: Returns the function to format a value (takes one parameter) and a
2368
    boolean for aligning the value on the right-hand side
2369

2370
  """
2371
  fmt = override.get(fdef.name, None)
2372
  if fmt is not None:
2373
    return fmt
2374

    
2375
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2376

    
2377
  if fdef.kind == constants.QFT_UNIT:
2378
    # Can't keep this information in the static dictionary
2379
    return (lambda value: utils.FormatUnit(value, unit), True)
2380

    
2381
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2382
  if fmt is not None:
2383
    return fmt
2384

    
2385
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2386

    
2387

    
2388
class _QueryColumnFormatter:
2389
  """Callable class for formatting fields of a query.
2390

2391
  """
2392
  def __init__(self, fn, status_fn, verbose):
2393
    """Initializes this class.
2394

2395
    @type fn: callable
2396
    @param fn: Formatting function
2397
    @type status_fn: callable
2398
    @param status_fn: Function to report fields' status
2399
    @type verbose: boolean
2400
    @param verbose: whether to use verbose field descriptions or not
2401

2402
    """
2403
    self._fn = fn
2404
    self._status_fn = status_fn
2405
    if verbose:
2406
      self._desc_index = 0
2407
    else:
2408
      self._desc_index = 1
2409

    
2410
  def __call__(self, data):
2411
    """Returns a field's string representation.
2412

2413
    """
2414
    (status, value) = data
2415

    
2416
    # Report status
2417
    self._status_fn(status)
2418

    
2419
    if status == constants.RS_NORMAL:
2420
      return self._fn(value)
2421

    
2422
    assert value is None, \
2423
           "Found value %r for abnormal status %s" % (value, status)
2424

    
2425
    if status in constants.RSS_DESCRIPTION:
2426
      return constants.RSS_DESCRIPTION[status][self._desc_index]
2427

    
2428
    raise NotImplementedError("Unknown status %s" % status)
2429

    
2430

    
2431
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2432
                      header=False, verbose=False):
2433
  """Formats data in L{objects.QueryResponse}.
2434

2435
  @type result: L{objects.QueryResponse}
2436
  @param result: result of query operation
2437
  @type unit: string
2438
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2439
    see L{utils.text.FormatUnit}
2440
  @type format_override: dict
2441
  @param format_override: Dictionary for overriding field formatting functions,
2442
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2443
  @type separator: string or None
2444
  @param separator: String used to separate fields
2445
  @type header: bool
2446
  @param header: Whether to output header row
2447
  @type verbose: boolean
2448
  @param verbose: whether to use verbose field descriptions or not
2449

2450
  """
2451
  if unit is None:
2452
    if separator:
2453
      unit = "m"
2454
    else:
2455
      unit = "h"
2456

    
2457
  if format_override is None:
2458
    format_override = {}
2459

    
2460
  stats = dict.fromkeys(constants.RS_ALL, 0)
2461

    
2462
  def _RecordStatus(status):
2463
    if status in stats:
2464
      stats[status] += 1
2465

    
2466
  columns = []
2467
  for fdef in result.fields:
2468
    assert fdef.title and fdef.name
2469
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2470
    columns.append(TableColumn(fdef.title,
2471
                               _QueryColumnFormatter(fn, _RecordStatus,
2472
                                                     verbose),
2473
                               align_right))
2474

    
2475
  table = FormatTable(result.data, columns, header, separator)
2476

    
2477
  # Collect statistics
2478
  assert len(stats) == len(constants.RS_ALL)
2479
  assert compat.all(count >= 0 for count in stats.values())
2480

    
2481
  # Determine overall status. If there was no data, unknown fields must be
2482
  # detected via the field definitions.
2483
  if (stats[constants.RS_UNKNOWN] or
2484
      (not result.data and _GetUnknownFields(result.fields))):
2485
    status = QR_UNKNOWN
2486
  elif compat.any(count > 0 for key, count in stats.items()
2487
                  if key != constants.RS_NORMAL):
2488
    status = QR_INCOMPLETE
2489
  else:
2490
    status = QR_NORMAL
2491

    
2492
  return (status, table)
2493

    
2494

    
2495
def _GetUnknownFields(fdefs):
2496
  """Returns list of unknown fields included in C{fdefs}.
2497

2498
  @type fdefs: list of L{objects.QueryFieldDefinition}
2499

2500
  """
2501
  return [fdef for fdef in fdefs
2502
          if fdef.kind == constants.QFT_UNKNOWN]
2503

    
2504

    
2505
def _WarnUnknownFields(fdefs):
2506
  """Prints a warning to stderr if a query included unknown fields.
2507

2508
  @type fdefs: list of L{objects.QueryFieldDefinition}
2509

2510
  """
2511
  unknown = _GetUnknownFields(fdefs)
2512
  if unknown:
2513
    ToStderr("Warning: Queried for unknown fields %s",
2514
             utils.CommaJoin(fdef.name for fdef in unknown))
2515
    return True
2516

    
2517
  return False
2518

    
2519

    
2520
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2521
                format_override=None, verbose=False):
2522
  """Generic implementation for listing all items of a resource.
2523

2524
  @param resource: One of L{constants.QR_OP_LUXI}
2525
  @type fields: list of strings
2526
  @param fields: List of fields to query for
2527
  @type names: list of strings
2528
  @param names: Names of items to query for
2529
  @type unit: string or None
2530
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2531
    None for automatic choice (human-readable for non-separator usage,
2532
    otherwise megabytes); this is a one-letter string
2533
  @type separator: string or None
2534
  @param separator: String used to separate fields
2535
  @type header: bool
2536
  @param header: Whether to show header row
2537
  @type format_override: dict
2538
  @param format_override: Dictionary for overriding field formatting functions,
2539
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2540
  @type verbose: boolean
2541
  @param verbose: whether to use verbose field descriptions or not
2542

2543
  """
2544
  if cl is None:
2545
    cl = GetClient()
2546

    
2547
  if not names:
2548
    names = None
2549

    
2550
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2551

    
2552
  found_unknown = _WarnUnknownFields(response.fields)
2553

    
2554
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2555
                                     header=header,
2556
                                     format_override=format_override,
2557
                                     verbose=verbose)
2558

    
2559
  for line in data:
2560
    ToStdout(line)
2561

    
2562
  assert ((found_unknown and status == QR_UNKNOWN) or
2563
          (not found_unknown and status != QR_UNKNOWN))
2564

    
2565
  if status == QR_UNKNOWN:
2566
    return constants.EXIT_UNKNOWN_FIELD
2567

    
2568
  # TODO: Should the list command fail if not all data could be collected?
2569
  return constants.EXIT_SUCCESS
2570

    
2571

    
2572
def GenericListFields(resource, fields, separator, header, cl=None):
2573
  """Generic implementation for listing fields for a resource.
2574

2575
  @param resource: One of L{constants.QR_OP_LUXI}
2576
  @type fields: list of strings
2577
  @param fields: List of fields to query for
2578
  @type separator: string or None
2579
  @param separator: String used to separate fields
2580
  @type header: bool
2581
  @param header: Whether to show header row
2582

2583
  """
2584
  if cl is None:
2585
    cl = GetClient()
2586

    
2587
  if not fields:
2588
    fields = None
2589

    
2590
  response = cl.QueryFields(resource, fields)
2591

    
2592
  found_unknown = _WarnUnknownFields(response.fields)
2593

    
2594
  columns = [
2595
    TableColumn("Name", str, False),
2596
    TableColumn("Title", str, False),
2597
    # TODO: Add field description to master daemon
2598
    ]
2599

    
2600
  rows = [[fdef.name, fdef.title] for fdef in response.fields]
2601

    
2602
  for line in FormatTable(rows, columns, header, separator):
2603
    ToStdout(line)
2604

    
2605
  if found_unknown:
2606
    return constants.EXIT_UNKNOWN_FIELD
2607

    
2608
  return constants.EXIT_SUCCESS
2609

    
2610

    
2611
class TableColumn:
2612
  """Describes a column for L{FormatTable}.
2613

2614
  """
2615
  def __init__(self, title, fn, align_right):
2616
    """Initializes this class.
2617

2618
    @type title: string
2619
    @param title: Column title
2620
    @type fn: callable
2621
    @param fn: Formatting function
2622
    @type align_right: bool
2623
    @param align_right: Whether to align values on the right-hand side
2624

2625
    """
2626
    self.title = title
2627
    self.format = fn
2628
    self.align_right = align_right
2629

    
2630

    
2631
def _GetColFormatString(width, align_right):
2632
  """Returns the format string for a field.
2633

2634
  """
2635
  if align_right:
2636
    sign = ""
2637
  else:
2638
    sign = "-"
2639

    
2640
  return "%%%s%ss" % (sign, width)
2641

    
2642

    
2643
def FormatTable(rows, columns, header, separator):
2644
  """Formats data as a table.
2645

2646
  @type rows: list of lists
2647
  @param rows: Row data, one list per row
2648
  @type columns: list of L{TableColumn}
2649
  @param columns: Column descriptions
2650
  @type header: bool
2651
  @param header: Whether to show header row
2652
  @type separator: string or None
2653
  @param separator: String used to separate columns
2654

2655
  """
2656
  if header:
2657
    data = [[col.title for col in columns]]
2658
    colwidth = [len(col.title) for col in columns]
2659
  else:
2660
    data = []
2661
    colwidth = [0 for _ in columns]
2662

    
2663
  # Format row data
2664
  for row in rows:
2665
    assert len(row) == len(columns)
2666

    
2667
    formatted = [col.format(value) for value, col in zip(row, columns)]
2668

    
2669
    if separator is None:
2670
      # Update column widths
2671
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2672
        # Modifying a list's items while iterating is fine
2673
        colwidth[idx] = max(oldwidth, len(value))
2674

    
2675
    data.append(formatted)
2676

    
2677
  if separator is not None:
2678
    # Return early if a separator is used
2679
    return [separator.join(row) for row in data]
2680

    
2681
  if columns and not columns[-1].align_right:
2682
    # Avoid unnecessary spaces at end of line
2683
    colwidth[-1] = 0
2684

    
2685
  # Build format string
2686
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2687
                  for col, width in zip(columns, colwidth)])
2688

    
2689
  return [fmt % tuple(row) for row in data]
2690

    
2691

    
2692
def FormatTimestamp(ts):
2693
  """Formats a given timestamp.
2694

2695
  @type ts: timestamp
2696
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2697

2698
  @rtype: string
2699
  @return: a string with the formatted timestamp
2700

2701
  """
2702
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2703
    return '?'
2704
  sec, usec = ts
2705
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2706

    
2707

    
2708
def ParseTimespec(value):
2709
  """Parse a time specification.
2710

2711
  The following suffixed will be recognized:
2712

2713
    - s: seconds
2714
    - m: minutes
2715
    - h: hours
2716
    - d: day
2717
    - w: weeks
2718

2719
  Without any suffix, the value will be taken to be in seconds.
2720

2721
  """
2722
  value = str(value)
2723
  if not value:
2724
    raise errors.OpPrereqError("Empty time specification passed")
2725
  suffix_map = {
2726
    's': 1,
2727
    'm': 60,
2728
    'h': 3600,
2729
    'd': 86400,
2730
    'w': 604800,
2731
    }
2732
  if value[-1] not in suffix_map:
2733
    try:
2734
      value = int(value)
2735
    except (TypeError, ValueError):
2736
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2737
  else:
2738
    multiplier = suffix_map[value[-1]]
2739
    value = value[:-1]
2740
    if not value: # no data left after stripping the suffix
2741
      raise errors.OpPrereqError("Invalid time specification (only"
2742
                                 " suffix passed)")
2743
    try:
2744
      value = int(value) * multiplier
2745
    except (TypeError, ValueError):
2746
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2747
  return value
2748

    
2749

    
2750
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2751
                   filter_master=False):
2752
  """Returns the names of online nodes.
2753

2754
  This function will also log a warning on stderr with the names of
2755
  the online nodes.
2756

2757
  @param nodes: if not empty, use only this subset of nodes (minus the
2758
      offline ones)
2759
  @param cl: if not None, luxi client to use
2760
  @type nowarn: boolean
2761
  @param nowarn: by default, this function will output a note with the
2762
      offline nodes that are skipped; if this parameter is True the
2763
      note is not displayed
2764
  @type secondary_ips: boolean
2765
  @param secondary_ips: if True, return the secondary IPs instead of the
2766
      names, useful for doing network traffic over the replication interface
2767
      (if any)
2768
  @type filter_master: boolean
2769
  @param filter_master: if True, do not return the master node in the list
2770
      (useful in coordination with secondary_ips where we cannot check our
2771
      node name against the list)
2772

2773
  """
2774
  if cl is None:
2775
    cl = GetClient()
2776

    
2777
  if secondary_ips:
2778
    name_idx = 2
2779
  else:
2780
    name_idx = 0
2781

    
2782
  if filter_master:
2783
    master_node = cl.QueryConfigValues(["master_node"])[0]
2784
    filter_fn = lambda x: x != master_node
2785
  else:
2786
    filter_fn = lambda _: True
2787

    
2788
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2789
                         use_locking=False)
2790
  offline = [row[0] for row in result if row[1]]
2791
  if offline and not nowarn:
2792
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2793
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2794

    
2795

    
2796
def _ToStream(stream, txt, *args):
2797
  """Write a message to a stream, bypassing the logging system
2798

2799
  @type stream: file object
2800
  @param stream: the file to which we should write
2801
  @type txt: str
2802
  @param txt: the message
2803

2804
  """
2805
  try:
2806
    if args:
2807
      args = tuple(args)
2808
      stream.write(txt % args)
2809
    else:
2810
      stream.write(txt)
2811
    stream.write('\n')
2812
    stream.flush()
2813
  except IOError, err:
2814
    if err.errno == errno.EPIPE:
2815
      # our terminal went away, we'll exit
2816
      sys.exit(constants.EXIT_FAILURE)
2817
    else:
2818
      raise
2819

    
2820

    
2821
def ToStdout(txt, *args):
2822
  """Write a message to stdout only, bypassing the logging system
2823

2824
  This is just a wrapper over _ToStream.
2825

2826
  @type txt: str
2827
  @param txt: the message
2828

2829
  """
2830
  _ToStream(sys.stdout, txt, *args)
2831

    
2832

    
2833
def ToStderr(txt, *args):
2834
  """Write a message to stderr only, bypassing the logging system
2835

2836
  This is just a wrapper over _ToStream.
2837

2838
  @type txt: str
2839
  @param txt: the message
2840

2841
  """
2842
  _ToStream(sys.stderr, txt, *args)
2843

    
2844

    
2845
class JobExecutor(object):
2846
  """Class which manages the submission and execution of multiple jobs.
2847

2848
  Note that instances of this class should not be reused between
2849
  GetResults() calls.
2850

2851
  """
2852
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2853
    self.queue = []
2854
    if cl is None:
2855
      cl = GetClient()
2856
    self.cl = cl
2857
    self.verbose = verbose
2858
    self.jobs = []
2859
    self.opts = opts
2860
    self.feedback_fn = feedback_fn
2861

    
2862
  def QueueJob(self, name, *ops):
2863
    """Record a job for later submit.
2864

2865
    @type name: string
2866
    @param name: a description of the job, will be used in WaitJobSet
2867
    """
2868
    SetGenericOpcodeOpts(ops, self.opts)
2869
    self.queue.append((name, ops))
2870

    
2871
  def SubmitPending(self, each=False):
2872
    """Submit all pending jobs.
2873

2874
    """
2875
    if each:
2876
      results = []
2877
      for row in self.queue:
2878
        # SubmitJob will remove the success status, but raise an exception if
2879
        # the submission fails, so we'll notice that anyway.
2880
        results.append([True, self.cl.SubmitJob(row[1])])
2881
    else:
2882
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2883
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2884
                                                            self.queue)):
2885
      self.jobs.append((idx, status, data, name))
2886

    
2887
  def _ChooseJob(self):
2888
    """Choose a non-waiting/queued job to poll next.
2889

2890
    """
2891
    assert self.jobs, "_ChooseJob called with empty job list"
2892

    
2893
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2894
    assert result
2895

    
2896
    for job_data, status in zip(self.jobs, result):
2897
      if (isinstance(status, list) and status and
2898
          status[0] in (constants.JOB_STATUS_QUEUED,
2899
                        constants.JOB_STATUS_WAITLOCK,
2900
                        constants.JOB_STATUS_CANCELING)):
2901
        # job is still present and waiting
2902
        continue
2903
      # good candidate found (either running job or lost job)
2904
      self.jobs.remove(job_data)
2905
      return job_data
2906

    
2907
    # no job found
2908
    return self.jobs.pop(0)
2909

    
2910
  def GetResults(self):
2911
    """Wait for and return the results of all jobs.
2912

2913
    @rtype: list
2914
    @return: list of tuples (success, job results), in the same order
2915
        as the submitted jobs; if a job has failed, instead of the result
2916
        there will be the error message
2917

2918
    """
2919
    if not self.jobs:
2920
      self.SubmitPending()
2921
    results = []
2922
    if self.verbose:
2923
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2924
      if ok_jobs:
2925
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2926

    
2927
    # first, remove any non-submitted jobs
2928
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2929
    for idx, _, jid, name in failures:
2930
      ToStderr("Failed to submit job for %s: %s", name, jid)
2931
      results.append((idx, False, jid))
2932

    
2933
    while self.jobs:
2934
      (idx, _, jid, name) = self._ChooseJob()
2935
      ToStdout("Waiting for job %s for %s...", jid, name)
2936
      try:
2937
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2938
        success = True
2939
      except errors.JobLost, err:
2940
        _, job_result = FormatError(err)
2941
        ToStderr("Job %s for %s has been archived, cannot check its result",
2942
                 jid, name)
2943
        success = False
2944
      except (errors.GenericError, luxi.ProtocolError), err:
2945
        _, job_result = FormatError(err)
2946
        success = False
2947
        # the error message will always be shown, verbose or not
2948
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2949

    
2950
      results.append((idx, success, job_result))
2951

    
2952
    # sort based on the index, then drop it
2953
    results.sort()
2954
    results = [i[1:] for i in results]
2955

    
2956
    return results
2957

    
2958
  def WaitOrShow(self, wait):
2959
    """Wait for job results or only print the job IDs.
2960

2961
    @type wait: boolean
2962
    @param wait: whether to wait or not
2963

2964
    """
2965
    if wait:
2966
      return self.GetResults()
2967
    else:
2968
      if not self.jobs:
2969
        self.SubmitPending()
2970
      for _, status, result, name in self.jobs:
2971
        if status:
2972
          ToStdout("%s: %s", result, name)
2973
        else:
2974
          ToStderr("Failure for %s: %s", name, result)
2975
      return [row[1:3] for row in self.jobs]
2976

    
2977

    
2978
def FormatParameterDict(buf, param_dict, actual, level=1):
2979
  """Formats a parameter dictionary.
2980

2981
  @type buf: L{StringIO}
2982
  @param buf: the buffer into which to write
2983
  @type param_dict: dict
2984
  @param param_dict: the own parameters
2985
  @type actual: dict
2986
  @param actual: the current parameter set (including defaults)
2987
  @param level: Level of indent
2988

2989
  """
2990
  indent = "  " * level
2991
  for key in sorted(actual):
2992
    val = param_dict.get(key, "default (%s)" % actual[key])
2993
    buf.write("%s- %s: %s\n" % (indent, key, val))