Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ dd94e9f6

History | View | Annotate | Download (93.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "AUTO_PROMOTE_OPT",
55
  "AUTO_REPLACE_OPT",
56
  "BACKEND_OPT",
57
  "BLK_OS_OPT",
58
  "CAPAB_MASTER_OPT",
59
  "CAPAB_VM_OPT",
60
  "CLEANUP_OPT",
61
  "CLUSTER_DOMAIN_SECRET_OPT",
62
  "CONFIRM_OPT",
63
  "CP_SIZE_OPT",
64
  "DEBUG_OPT",
65
  "DEBUG_SIMERR_OPT",
66
  "DISKIDX_OPT",
67
  "DISK_OPT",
68
  "DISK_TEMPLATE_OPT",
69
  "DRAINED_OPT",
70
  "DRY_RUN_OPT",
71
  "DRBD_HELPER_OPT",
72
  "EARLY_RELEASE_OPT",
73
  "ENABLED_HV_OPT",
74
  "ERROR_CODES_OPT",
75
  "FIELDS_OPT",
76
  "FILESTORE_DIR_OPT",
77
  "FILESTORE_DRIVER_OPT",
78
  "FORCE_OPT",
79
  "FORCE_VARIANT_OPT",
80
  "GLOBAL_FILEDIR_OPT",
81
  "HID_OS_OPT",
82
  "HVLIST_OPT",
83
  "HVOPTS_OPT",
84
  "HYPERVISOR_OPT",
85
  "IALLOCATOR_OPT",
86
  "DEFAULT_IALLOCATOR_OPT",
87
  "IDENTIFY_DEFAULTS_OPT",
88
  "IGNORE_CONSIST_OPT",
89
  "IGNORE_FAILURES_OPT",
90
  "IGNORE_OFFLINE_OPT",
91
  "IGNORE_REMOVE_FAILURES_OPT",
92
  "IGNORE_SECONDARIES_OPT",
93
  "IGNORE_SIZE_OPT",
94
  "INTERVAL_OPT",
95
  "MAC_PREFIX_OPT",
96
  "MAINTAIN_NODE_HEALTH_OPT",
97
  "MASTER_NETDEV_OPT",
98
  "MC_OPT",
99
  "MIGRATION_MODE_OPT",
100
  "NET_OPT",
101
  "NEW_CLUSTER_CERT_OPT",
102
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
103
  "NEW_CONFD_HMAC_KEY_OPT",
104
  "NEW_RAPI_CERT_OPT",
105
  "NEW_SECONDARY_OPT",
106
  "NIC_PARAMS_OPT",
107
  "NODE_LIST_OPT",
108
  "NODE_PLACEMENT_OPT",
109
  "NODEGROUP_OPT",
110
  "NODE_PARAMS_OPT",
111
  "NODE_POWERED_OPT",
112
  "NODRBD_STORAGE_OPT",
113
  "NOHDR_OPT",
114
  "NOIPCHECK_OPT",
115
  "NO_INSTALL_OPT",
116
  "NONAMECHECK_OPT",
117
  "NOLVM_STORAGE_OPT",
118
  "NOMODIFY_ETCHOSTS_OPT",
119
  "NOMODIFY_SSH_SETUP_OPT",
120
  "NONICS_OPT",
121
  "NONLIVE_OPT",
122
  "NONPLUS1_OPT",
123
  "NOSHUTDOWN_OPT",
124
  "NOSTART_OPT",
125
  "NOSSH_KEYCHECK_OPT",
126
  "NOVOTING_OPT",
127
  "NWSYNC_OPT",
128
  "ON_PRIMARY_OPT",
129
  "ON_SECONDARY_OPT",
130
  "OFFLINE_OPT",
131
  "OSPARAMS_OPT",
132
  "OS_OPT",
133
  "OS_SIZE_OPT",
134
  "PREALLOC_WIPE_DISKS_OPT",
135
  "PRIMARY_IP_VERSION_OPT",
136
  "PRIORITY_OPT",
137
  "RAPI_CERT_OPT",
138
  "READD_OPT",
139
  "REBOOT_TYPE_OPT",
140
  "REMOVE_INSTANCE_OPT",
141
  "REMOVE_UIDS_OPT",
142
  "RESERVED_LVS_OPT",
143
  "ROMAN_OPT",
144
  "SECONDARY_IP_OPT",
145
  "SELECT_OS_OPT",
146
  "SEP_OPT",
147
  "SHOWCMD_OPT",
148
  "SHUTDOWN_TIMEOUT_OPT",
149
  "SINGLE_NODE_OPT",
150
  "SRC_DIR_OPT",
151
  "SRC_NODE_OPT",
152
  "SUBMIT_OPT",
153
  "STATIC_OPT",
154
  "SYNC_OPT",
155
  "TAG_SRC_OPT",
156
  "TIMEOUT_OPT",
157
  "UIDPOOL_OPT",
158
  "USEUNITS_OPT",
159
  "USE_REPL_NET_OPT",
160
  "VERBOSE_OPT",
161
  "VG_NAME_OPT",
162
  "YES_DOIT_OPT",
163
  # Generic functions for CLI programs
164
  "GenericMain",
165
  "GenericInstanceCreate",
166
  "GenericList",
167
  "GenericListFields",
168
  "GetClient",
169
  "GetOnlineNodes",
170
  "JobExecutor",
171
  "JobSubmittedException",
172
  "ParseTimespec",
173
  "RunWhileClusterStopped",
174
  "SubmitOpCode",
175
  "SubmitOrSend",
176
  "UsesRPC",
177
  # Formatting functions
178
  "ToStderr", "ToStdout",
179
  "FormatError",
180
  "FormatQueryResult",
181
  "GenerateTable",
182
  "AskUser",
183
  "FormatTimestamp",
184
  "FormatLogMessage",
185
  # Tags functions
186
  "ListTags",
187
  "AddTags",
188
  "RemoveTags",
189
  # command line options support infrastructure
190
  "ARGS_MANY_INSTANCES",
191
  "ARGS_MANY_NODES",
192
  "ARGS_MANY_GROUPS",
193
  "ARGS_NONE",
194
  "ARGS_ONE_INSTANCE",
195
  "ARGS_ONE_NODE",
196
  "ARGS_ONE_GROUP",
197
  "ARGS_ONE_OS",
198
  "ArgChoice",
199
  "ArgCommand",
200
  "ArgFile",
201
  "ArgGroup",
202
  "ArgHost",
203
  "ArgInstance",
204
  "ArgJobId",
205
  "ArgNode",
206
  "ArgOs",
207
  "ArgSuggest",
208
  "ArgUnknown",
209
  "OPT_COMPL_INST_ADD_NODES",
210
  "OPT_COMPL_MANY_NODES",
211
  "OPT_COMPL_ONE_IALLOCATOR",
212
  "OPT_COMPL_ONE_INSTANCE",
213
  "OPT_COMPL_ONE_NODE",
214
  "OPT_COMPL_ONE_NODEGROUP",
215
  "OPT_COMPL_ONE_OS",
216
  "cli_option",
217
  "SplitNodeOption",
218
  "CalculateOSNames",
219
  "ParseFields",
220
  "COMMON_CREATE_OPTS",
221
  ]
222

    
223
NO_PREFIX = "no_"
224
UN_PREFIX = "-"
225

    
226
#: Priorities (sorted)
227
_PRIORITY_NAMES = [
228
  ("low", constants.OP_PRIO_LOW),
229
  ("normal", constants.OP_PRIO_NORMAL),
230
  ("high", constants.OP_PRIO_HIGH),
231
  ]
232

    
233
#: Priority dictionary for easier lookup
234
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
235
# we migrate to Python 2.6
236
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
237

    
238
# Query result status for clients
239
(QR_NORMAL,
240
 QR_UNKNOWN,
241
 QR_INCOMPLETE) = range(3)
242

    
243

    
244
class _Argument:
245
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
246
    self.min = min
247
    self.max = max
248

    
249
  def __repr__(self):
250
    return ("<%s min=%s max=%s>" %
251
            (self.__class__.__name__, self.min, self.max))
252

    
253

    
254
class ArgSuggest(_Argument):
255
  """Suggesting argument.
256

257
  Value can be any of the ones passed to the constructor.
258

259
  """
260
  # pylint: disable-msg=W0622
261
  def __init__(self, min=0, max=None, choices=None):
262
    _Argument.__init__(self, min=min, max=max)
263
    self.choices = choices
264

    
265
  def __repr__(self):
266
    return ("<%s min=%s max=%s choices=%r>" %
267
            (self.__class__.__name__, self.min, self.max, self.choices))
268

    
269

    
270
class ArgChoice(ArgSuggest):
271
  """Choice argument.
272

273
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
274
  but value must be one of the choices.
275

276
  """
277

    
278

    
279
class ArgUnknown(_Argument):
280
  """Unknown argument to program (e.g. determined at runtime).
281

282
  """
283

    
284

    
285
class ArgInstance(_Argument):
286
  """Instances argument.
287

288
  """
289

    
290

    
291
class ArgNode(_Argument):
292
  """Node argument.
293

294
  """
295

    
296

    
297
class ArgGroup(_Argument):
298
  """Node group argument.
299

300
  """
301

    
302

    
303
class ArgJobId(_Argument):
304
  """Job ID argument.
305

306
  """
307

    
308

    
309
class ArgFile(_Argument):
310
  """File path argument.
311

312
  """
313

    
314

    
315
class ArgCommand(_Argument):
316
  """Command argument.
317

318
  """
319

    
320

    
321
class ArgHost(_Argument):
322
  """Host argument.
323

324
  """
325

    
326

    
327
class ArgOs(_Argument):
328
  """OS argument.
329

330
  """
331

    
332

    
333
ARGS_NONE = []
334
ARGS_MANY_INSTANCES = [ArgInstance()]
335
ARGS_MANY_NODES = [ArgNode()]
336
ARGS_MANY_GROUPS = [ArgGroup()]
337
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
338
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
339
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
340
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
341

    
342

    
343
def _ExtractTagsObject(opts, args):
344
  """Extract the tag type object.
345

346
  Note that this function will modify its args parameter.
347

348
  """
349
  if not hasattr(opts, "tag_type"):
350
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
351
  kind = opts.tag_type
352
  if kind == constants.TAG_CLUSTER:
353
    retval = kind, kind
354
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
355
    if not args:
356
      raise errors.OpPrereqError("no arguments passed to the command")
357
    name = args.pop(0)
358
    retval = kind, name
359
  else:
360
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
361
  return retval
362

    
363

    
364
def _ExtendTags(opts, args):
365
  """Extend the args if a source file has been given.
366

367
  This function will extend the tags with the contents of the file
368
  passed in the 'tags_source' attribute of the opts parameter. A file
369
  named '-' will be replaced by stdin.
370

371
  """
372
  fname = opts.tags_source
373
  if fname is None:
374
    return
375
  if fname == "-":
376
    new_fh = sys.stdin
377
  else:
378
    new_fh = open(fname, "r")
379
  new_data = []
380
  try:
381
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
382
    # because of python bug 1633941
383
    while True:
384
      line = new_fh.readline()
385
      if not line:
386
        break
387
      new_data.append(line.strip())
388
  finally:
389
    new_fh.close()
390
  args.extend(new_data)
391

    
392

    
393
def ListTags(opts, args):
394
  """List the tags on a given object.
395

396
  This is a generic implementation that knows how to deal with all
397
  three cases of tag objects (cluster, node, instance). The opts
398
  argument is expected to contain a tag_type field denoting what
399
  object type we work on.
400

401
  """
402
  kind, name = _ExtractTagsObject(opts, args)
403
  cl = GetClient()
404
  result = cl.QueryTags(kind, name)
405
  result = list(result)
406
  result.sort()
407
  for tag in result:
408
    ToStdout(tag)
409

    
410

    
411
def AddTags(opts, args):
412
  """Add tags on a given object.
413

414
  This is a generic implementation that knows how to deal with all
415
  three cases of tag objects (cluster, node, instance). The opts
416
  argument is expected to contain a tag_type field denoting what
417
  object type we work on.
418

419
  """
420
  kind, name = _ExtractTagsObject(opts, args)
421
  _ExtendTags(opts, args)
422
  if not args:
423
    raise errors.OpPrereqError("No tags to be added")
424
  op = opcodes.OpAddTags(kind=kind, name=name, tags=args)
425
  SubmitOpCode(op, opts=opts)
426

    
427

    
428
def RemoveTags(opts, args):
429
  """Remove tags from a given object.
430

431
  This is a generic implementation that knows how to deal with all
432
  three cases of tag objects (cluster, node, instance). The opts
433
  argument is expected to contain a tag_type field denoting what
434
  object type we work on.
435

436
  """
437
  kind, name = _ExtractTagsObject(opts, args)
438
  _ExtendTags(opts, args)
439
  if not args:
440
    raise errors.OpPrereqError("No tags to be removed")
441
  op = opcodes.OpDelTags(kind=kind, name=name, tags=args)
442
  SubmitOpCode(op, opts=opts)
443

    
444

    
445
def check_unit(option, opt, value): # pylint: disable-msg=W0613
446
  """OptParsers custom converter for units.
447

448
  """
449
  try:
450
    return utils.ParseUnit(value)
451
  except errors.UnitParseError, err:
452
    raise OptionValueError("option %s: %s" % (opt, err))
453

    
454

    
455
def _SplitKeyVal(opt, data):
456
  """Convert a KeyVal string into a dict.
457

458
  This function will convert a key=val[,...] string into a dict. Empty
459
  values will be converted specially: keys which have the prefix 'no_'
460
  will have the value=False and the prefix stripped, the others will
461
  have value=True.
462

463
  @type opt: string
464
  @param opt: a string holding the option name for which we process the
465
      data, used in building error messages
466
  @type data: string
467
  @param data: a string of the format key=val,key=val,...
468
  @rtype: dict
469
  @return: {key=val, key=val}
470
  @raises errors.ParameterError: if there are duplicate keys
471

472
  """
473
  kv_dict = {}
474
  if data:
475
    for elem in utils.UnescapeAndSplit(data, sep=","):
476
      if "=" in elem:
477
        key, val = elem.split("=", 1)
478
      else:
479
        if elem.startswith(NO_PREFIX):
480
          key, val = elem[len(NO_PREFIX):], False
481
        elif elem.startswith(UN_PREFIX):
482
          key, val = elem[len(UN_PREFIX):], None
483
        else:
484
          key, val = elem, True
485
      if key in kv_dict:
486
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
487
                                    (key, opt))
488
      kv_dict[key] = val
489
  return kv_dict
490

    
491

    
492
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
493
  """Custom parser for ident:key=val,key=val options.
494

495
  This will store the parsed values as a tuple (ident, {key: val}). As such,
496
  multiple uses of this option via action=append is possible.
497

498
  """
499
  if ":" not in value:
500
    ident, rest = value, ''
501
  else:
502
    ident, rest = value.split(":", 1)
503

    
504
  if ident.startswith(NO_PREFIX):
505
    if rest:
506
      msg = "Cannot pass options when removing parameter groups: %s" % value
507
      raise errors.ParameterError(msg)
508
    retval = (ident[len(NO_PREFIX):], False)
509
  elif ident.startswith(UN_PREFIX):
510
    if rest:
511
      msg = "Cannot pass options when removing parameter groups: %s" % value
512
      raise errors.ParameterError(msg)
513
    retval = (ident[len(UN_PREFIX):], None)
514
  else:
515
    kv_dict = _SplitKeyVal(opt, rest)
516
    retval = (ident, kv_dict)
517
  return retval
518

    
519

    
520
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
521
  """Custom parser class for key=val,key=val options.
522

523
  This will store the parsed values as a dict {key: val}.
524

525
  """
526
  return _SplitKeyVal(opt, value)
527

    
528

    
529
def check_bool(option, opt, value): # pylint: disable-msg=W0613
530
  """Custom parser for yes/no options.
531

532
  This will store the parsed value as either True or False.
533

534
  """
535
  value = value.lower()
536
  if value == constants.VALUE_FALSE or value == "no":
537
    return False
538
  elif value == constants.VALUE_TRUE or value == "yes":
539
    return True
540
  else:
541
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
542

    
543

    
544
# completion_suggestion is normally a list. Using numeric values not evaluating
545
# to False for dynamic completion.
546
(OPT_COMPL_MANY_NODES,
547
 OPT_COMPL_ONE_NODE,
548
 OPT_COMPL_ONE_INSTANCE,
549
 OPT_COMPL_ONE_OS,
550
 OPT_COMPL_ONE_IALLOCATOR,
551
 OPT_COMPL_INST_ADD_NODES,
552
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
553

    
554
OPT_COMPL_ALL = frozenset([
555
  OPT_COMPL_MANY_NODES,
556
  OPT_COMPL_ONE_NODE,
557
  OPT_COMPL_ONE_INSTANCE,
558
  OPT_COMPL_ONE_OS,
559
  OPT_COMPL_ONE_IALLOCATOR,
560
  OPT_COMPL_INST_ADD_NODES,
561
  OPT_COMPL_ONE_NODEGROUP,
562
  ])
563

    
564

    
565
class CliOption(Option):
566
  """Custom option class for optparse.
567

568
  """
569
  ATTRS = Option.ATTRS + [
570
    "completion_suggest",
571
    ]
572
  TYPES = Option.TYPES + (
573
    "identkeyval",
574
    "keyval",
575
    "unit",
576
    "bool",
577
    )
578
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
579
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
580
  TYPE_CHECKER["keyval"] = check_key_val
581
  TYPE_CHECKER["unit"] = check_unit
582
  TYPE_CHECKER["bool"] = check_bool
583

    
584

    
585
# optparse.py sets make_option, so we do it for our own option class, too
586
cli_option = CliOption
587

    
588

    
589
_YORNO = "yes|no"
590

    
591
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
592
                       help="Increase debugging level")
593

    
594
NOHDR_OPT = cli_option("--no-headers", default=False,
595
                       action="store_true", dest="no_headers",
596
                       help="Don't display column headers")
597

    
598
SEP_OPT = cli_option("--separator", default=None,
599
                     action="store", dest="separator",
600
                     help=("Separator between output fields"
601
                           " (defaults to one space)"))
602

    
603
USEUNITS_OPT = cli_option("--units", default=None,
604
                          dest="units", choices=('h', 'm', 'g', 't'),
605
                          help="Specify units for output (one of hmgt)")
606

    
607
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
608
                        type="string", metavar="FIELDS",
609
                        help="Comma separated list of output fields")
610

    
611
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
612
                       default=False, help="Force the operation")
613

    
614
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
615
                         default=False, help="Do not require confirmation")
616

    
617
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
618
                                  action="store_true", default=False,
619
                                  help=("Ignore offline nodes and do as much"
620
                                        " as possible"))
621

    
622
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
623
                         default=None, help="File with tag names")
624

    
625
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
626
                        default=False, action="store_true",
627
                        help=("Submit the job and return the job ID, but"
628
                              " don't wait for the job to finish"))
629

    
630
SYNC_OPT = cli_option("--sync", dest="do_locking",
631
                      default=False, action="store_true",
632
                      help=("Grab locks while doing the queries"
633
                            " in order to ensure more consistent results"))
634

    
635
DRY_RUN_OPT = cli_option("--dry-run", default=False,
636
                         action="store_true",
637
                         help=("Do not execute the operation, just run the"
638
                               " check steps and verify it it could be"
639
                               " executed"))
640

    
641
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
642
                         action="store_true",
643
                         help="Increase the verbosity of the operation")
644

    
645
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
646
                              action="store_true", dest="simulate_errors",
647
                              help="Debugging option that makes the operation"
648
                              " treat most runtime checks as failed")
649

    
650
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
651
                        default=True, action="store_false",
652
                        help="Don't wait for sync (DANGEROUS!)")
653

    
654
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
655
                               help="Custom disk setup (diskless, file,"
656
                               " plain or drbd)",
657
                               default=None, metavar="TEMPL",
658
                               choices=list(constants.DISK_TEMPLATES))
659

    
660
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
661
                        help="Do not create any network cards for"
662
                        " the instance")
663

    
664
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
665
                               help="Relative path under default cluster-wide"
666
                               " file storage dir to store file-based disks",
667
                               default=None, metavar="<DIR>")
668

    
669
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
670
                                  help="Driver to use for image files",
671
                                  default="loop", metavar="<DRIVER>",
672
                                  choices=list(constants.FILE_DRIVER))
673

    
674
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
675
                            help="Select nodes for the instance automatically"
676
                            " using the <NAME> iallocator plugin",
677
                            default=None, type="string",
678
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
679

    
680
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
681
                            metavar="<NAME>",
682
                            help="Set the default instance allocator plugin",
683
                            default=None, type="string",
684
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
685

    
686
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
687
                    metavar="<os>",
688
                    completion_suggest=OPT_COMPL_ONE_OS)
689

    
690
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
691
                         type="keyval", default={},
692
                         help="OS parameters")
693

    
694
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
695
                               action="store_true", default=False,
696
                               help="Force an unknown variant")
697

    
698
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
699
                            action="store_true", default=False,
700
                            help="Do not install the OS (will"
701
                            " enable no-start)")
702

    
703
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
704
                         type="keyval", default={},
705
                         help="Backend parameters")
706

    
707
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
708
                         default={}, dest="hvparams",
709
                         help="Hypervisor parameters")
710

    
711
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
712
                            help="Hypervisor and hypervisor options, in the"
713
                            " format hypervisor:option=value,option=value,...",
714
                            default=None, type="identkeyval")
715

    
716
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
717
                        help="Hypervisor and hypervisor options, in the"
718
                        " format hypervisor:option=value,option=value,...",
719
                        default=[], action="append", type="identkeyval")
720

    
721
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
722
                           action="store_false",
723
                           help="Don't check that the instance's IP"
724
                           " is alive")
725

    
726
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
727
                             default=True, action="store_false",
728
                             help="Don't check that the instance's name"
729
                             " is resolvable")
730

    
731
NET_OPT = cli_option("--net",
732
                     help="NIC parameters", default=[],
733
                     dest="nics", action="append", type="identkeyval")
734

    
735
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
736
                      dest="disks", action="append", type="identkeyval")
737

    
738
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
739
                         help="Comma-separated list of disks"
740
                         " indices to act on (e.g. 0,2) (optional,"
741
                         " defaults to all disks)")
742

    
743
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
744
                         help="Enforces a single-disk configuration using the"
745
                         " given disk size, in MiB unless a suffix is used",
746
                         default=None, type="unit", metavar="<size>")
747

    
748
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
749
                                dest="ignore_consistency",
750
                                action="store_true", default=False,
751
                                help="Ignore the consistency of the disks on"
752
                                " the secondary")
753

    
754
NONLIVE_OPT = cli_option("--non-live", dest="live",
755
                         default=True, action="store_false",
756
                         help="Do a non-live migration (this usually means"
757
                         " freeze the instance, save the state, transfer and"
758
                         " only then resume running on the secondary node)")
759

    
760
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
761
                                default=None,
762
                                choices=list(constants.HT_MIGRATION_MODES),
763
                                help="Override default migration mode (choose"
764
                                " either live or non-live")
765

    
766
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
767
                                help="Target node and optional secondary node",
768
                                metavar="<pnode>[:<snode>]",
769
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
770

    
771
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
772
                           action="append", metavar="<node>",
773
                           help="Use only this node (can be used multiple"
774
                           " times, if not given defaults to all nodes)",
775
                           completion_suggest=OPT_COMPL_ONE_NODE)
776

    
777
NODEGROUP_OPT = cli_option("-g", "--node-group",
778
                           dest="nodegroup",
779
                           help="Node group (name or uuid)",
780
                           metavar="<nodegroup>",
781
                           default=None, type="string",
782
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
783

    
784
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
785
                             metavar="<node>",
786
                             completion_suggest=OPT_COMPL_ONE_NODE)
787

    
788
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
789
                         action="store_false",
790
                         help="Don't start the instance after creation")
791

    
792
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
793
                         action="store_true", default=False,
794
                         help="Show command instead of executing it")
795

    
796
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
797
                         default=False, action="store_true",
798
                         help="Instead of performing the migration, try to"
799
                         " recover from a failed cleanup. This is safe"
800
                         " to run even if the instance is healthy, but it"
801
                         " will create extra replication traffic and "
802
                         " disrupt briefly the replication (like during the"
803
                         " migration")
804

    
805
STATIC_OPT = cli_option("-s", "--static", dest="static",
806
                        action="store_true", default=False,
807
                        help="Only show configuration data, not runtime data")
808

    
809
ALL_OPT = cli_option("--all", dest="show_all",
810
                     default=False, action="store_true",
811
                     help="Show info on all instances on the cluster."
812
                     " This can take a long time to run, use wisely")
813

    
814
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
815
                           action="store_true", default=False,
816
                           help="Interactive OS reinstall, lists available"
817
                           " OS templates for selection")
818

    
819
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
820
                                 action="store_true", default=False,
821
                                 help="Remove the instance from the cluster"
822
                                 " configuration even if there are failures"
823
                                 " during the removal process")
824

    
825
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
826
                                        dest="ignore_remove_failures",
827
                                        action="store_true", default=False,
828
                                        help="Remove the instance from the"
829
                                        " cluster configuration even if there"
830
                                        " are failures during the removal"
831
                                        " process")
832

    
833
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
834
                                 action="store_true", default=False,
835
                                 help="Remove the instance from the cluster")
836

    
837
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
838
                               help="Specifies the new secondary node",
839
                               metavar="NODE", default=None,
840
                               completion_suggest=OPT_COMPL_ONE_NODE)
841

    
842
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
843
                            default=False, action="store_true",
844
                            help="Replace the disk(s) on the primary"
845
                            " node (only for the drbd template)")
846

    
847
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
848
                              default=False, action="store_true",
849
                              help="Replace the disk(s) on the secondary"
850
                              " node (only for the drbd template)")
851

    
852
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
853
                              default=False, action="store_true",
854
                              help="Lock all nodes and auto-promote as needed"
855
                              " to MC status")
856

    
857
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
858
                              default=False, action="store_true",
859
                              help="Automatically replace faulty disks"
860
                              " (only for the drbd template)")
861

    
862
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
863
                             default=False, action="store_true",
864
                             help="Ignore current recorded size"
865
                             " (useful for forcing activation when"
866
                             " the recorded size is wrong)")
867

    
868
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
869
                          metavar="<node>",
870
                          completion_suggest=OPT_COMPL_ONE_NODE)
871

    
872
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
873
                         metavar="<dir>")
874

    
875
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
876
                              help="Specify the secondary ip for the node",
877
                              metavar="ADDRESS", default=None)
878

    
879
READD_OPT = cli_option("--readd", dest="readd",
880
                       default=False, action="store_true",
881
                       help="Readd old node after replacing it")
882

    
883
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
884
                                default=True, action="store_false",
885
                                help="Disable SSH key fingerprint checking")
886

    
887

    
888
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
889
                    type="bool", default=None, metavar=_YORNO,
890
                    help="Set the master_candidate flag on the node")
891

    
892
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
893
                         type="bool", default=None,
894
                         help="Set the offline flag on the node")
895

    
896
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
897
                         type="bool", default=None,
898
                         help="Set the drained flag on the node")
899

    
900
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
901
                    type="bool", default=None, metavar=_YORNO,
902
                    help="Set the master_capable flag on the node")
903

    
904
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
905
                    type="bool", default=None, metavar=_YORNO,
906
                    help="Set the vm_capable flag on the node")
907

    
908
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
909
                             type="bool", default=None, metavar=_YORNO,
910
                             help="Set the allocatable flag on a volume")
911

    
912
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
913
                               help="Disable support for lvm based instances"
914
                               " (cluster-wide)",
915
                               action="store_false", default=True)
916

    
917
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
918
                            dest="enabled_hypervisors",
919
                            help="Comma-separated list of hypervisors",
920
                            type="string", default=None)
921

    
922
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
923
                            type="keyval", default={},
924
                            help="NIC parameters")
925

    
926
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
927
                         dest="candidate_pool_size", type="int",
928
                         help="Set the candidate pool size")
929

    
930
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
931
                         help="Enables LVM and specifies the volume group"
932
                         " name (cluster-wide) for disk allocation [xenvg]",
933
                         metavar="VG", default=None)
934

    
935
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
936
                          help="Destroy cluster", action="store_true")
937

    
938
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
939
                          help="Skip node agreement check (dangerous)",
940
                          action="store_true", default=False)
941

    
942
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
943
                            help="Specify the mac prefix for the instance IP"
944
                            " addresses, in the format XX:XX:XX",
945
                            metavar="PREFIX",
946
                            default=None)
947

    
948
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
949
                               help="Specify the node interface (cluster-wide)"
950
                               " on which the master IP address will be added "
951
                               " [%s]" % constants.DEFAULT_BRIDGE,
952
                               metavar="NETDEV",
953
                               default=constants.DEFAULT_BRIDGE)
954

    
955
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
956
                                help="Specify the default directory (cluster-"
957
                                "wide) for storing the file-based disks [%s]" %
958
                                constants.DEFAULT_FILE_STORAGE_DIR,
959
                                metavar="DIR",
960
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
961

    
962
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
963
                                   help="Don't modify /etc/hosts",
964
                                   action="store_false", default=True)
965

    
966
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
967
                                    help="Don't initialize SSH keys",
968
                                    action="store_false", default=True)
969

    
970
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
971
                             help="Enable parseable error messages",
972
                             action="store_true", default=False)
973

    
974
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
975
                          help="Skip N+1 memory redundancy tests",
976
                          action="store_true", default=False)
977

    
978
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
979
                             help="Type of reboot: soft/hard/full",
980
                             default=constants.INSTANCE_REBOOT_HARD,
981
                             metavar="<REBOOT>",
982
                             choices=list(constants.REBOOT_TYPES))
983

    
984
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
985
                                    dest="ignore_secondaries",
986
                                    default=False, action="store_true",
987
                                    help="Ignore errors from secondaries")
988

    
989
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
990
                            action="store_false", default=True,
991
                            help="Don't shutdown the instance (unsafe)")
992

    
993
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
994
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
995
                         help="Maximum time to wait")
996

    
997
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
998
                         dest="shutdown_timeout", type="int",
999
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1000
                         help="Maximum time to wait for instance shutdown")
1001

    
1002
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1003
                          default=None,
1004
                          help=("Number of seconds between repetions of the"
1005
                                " command"))
1006

    
1007
EARLY_RELEASE_OPT = cli_option("--early-release",
1008
                               dest="early_release", default=False,
1009
                               action="store_true",
1010
                               help="Release the locks on the secondary"
1011
                               " node(s) early")
1012

    
1013
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1014
                                  dest="new_cluster_cert",
1015
                                  default=False, action="store_true",
1016
                                  help="Generate a new cluster certificate")
1017

    
1018
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1019
                           default=None,
1020
                           help="File containing new RAPI certificate")
1021

    
1022
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1023
                               default=None, action="store_true",
1024
                               help=("Generate a new self-signed RAPI"
1025
                                     " certificate"))
1026

    
1027
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1028
                                    dest="new_confd_hmac_key",
1029
                                    default=False, action="store_true",
1030
                                    help=("Create a new HMAC key for %s" %
1031
                                          constants.CONFD))
1032

    
1033
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1034
                                       dest="cluster_domain_secret",
1035
                                       default=None,
1036
                                       help=("Load new new cluster domain"
1037
                                             " secret from file"))
1038

    
1039
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1040
                                           dest="new_cluster_domain_secret",
1041
                                           default=False, action="store_true",
1042
                                           help=("Create a new cluster domain"
1043
                                                 " secret"))
1044

    
1045
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1046
                              dest="use_replication_network",
1047
                              help="Whether to use the replication network"
1048
                              " for talking to the nodes",
1049
                              action="store_true", default=False)
1050

    
1051
MAINTAIN_NODE_HEALTH_OPT = \
1052
    cli_option("--maintain-node-health", dest="maintain_node_health",
1053
               metavar=_YORNO, default=None, type="bool",
1054
               help="Configure the cluster to automatically maintain node"
1055
               " health, by shutting down unknown instances, shutting down"
1056
               " unknown DRBD devices, etc.")
1057

    
1058
IDENTIFY_DEFAULTS_OPT = \
1059
    cli_option("--identify-defaults", dest="identify_defaults",
1060
               default=False, action="store_true",
1061
               help="Identify which saved instance parameters are equal to"
1062
               " the current cluster defaults and set them as such, instead"
1063
               " of marking them as overridden")
1064

    
1065
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1066
                         action="store", dest="uid_pool",
1067
                         help=("A list of user-ids or user-id"
1068
                               " ranges separated by commas"))
1069

    
1070
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1071
                          action="store", dest="add_uids",
1072
                          help=("A list of user-ids or user-id"
1073
                                " ranges separated by commas, to be"
1074
                                " added to the user-id pool"))
1075

    
1076
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1077
                             action="store", dest="remove_uids",
1078
                             help=("A list of user-ids or user-id"
1079
                                   " ranges separated by commas, to be"
1080
                                   " removed from the user-id pool"))
1081

    
1082
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1083
                             action="store", dest="reserved_lvs",
1084
                             help=("A comma-separated list of reserved"
1085
                                   " logical volumes names, that will be"
1086
                                   " ignored by cluster verify"))
1087

    
1088
ROMAN_OPT = cli_option("--roman",
1089
                       dest="roman_integers", default=False,
1090
                       action="store_true",
1091
                       help="Use roman numbers for positive integers")
1092

    
1093
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1094
                             action="store", default=None,
1095
                             help="Specifies usermode helper for DRBD")
1096

    
1097
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1098
                                action="store_false", default=True,
1099
                                help="Disable support for DRBD")
1100

    
1101
PRIMARY_IP_VERSION_OPT = \
1102
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1103
               action="store", dest="primary_ip_version",
1104
               metavar="%d|%d" % (constants.IP4_VERSION,
1105
                                  constants.IP6_VERSION),
1106
               help="Cluster-wide IP version for primary IP")
1107

    
1108
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1109
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1110
                          choices=_PRIONAME_TO_VALUE.keys(),
1111
                          help="Priority for opcode processing")
1112

    
1113
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1114
                        type="bool", default=None, metavar=_YORNO,
1115
                        help="Sets the hidden flag on the OS")
1116

    
1117
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1118
                        type="bool", default=None, metavar=_YORNO,
1119
                        help="Sets the blacklisted flag on the OS")
1120

    
1121
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1122
                                     type="bool", metavar=_YORNO,
1123
                                     dest="prealloc_wipe_disks",
1124
                                     help=("Wipe disks prior to instance"
1125
                                           " creation"))
1126

    
1127
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1128
                             type="keyval", default=None,
1129
                             help="Node parameters")
1130

    
1131
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1132
                              action="store", metavar="POLICY", default=None,
1133
                              help="Allocation policy for the node group")
1134

    
1135
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1136
                              type="bool", metavar=_YORNO,
1137
                              dest="node_powered",
1138
                              help="Specify if the SoR for node is powered")
1139

    
1140

    
1141
#: Options provided by all commands
1142
COMMON_OPTS = [DEBUG_OPT]
1143

    
1144
# common options for creating instances. add and import then add their own
1145
# specific ones.
1146
COMMON_CREATE_OPTS = [
1147
  BACKEND_OPT,
1148
  DISK_OPT,
1149
  DISK_TEMPLATE_OPT,
1150
  FILESTORE_DIR_OPT,
1151
  FILESTORE_DRIVER_OPT,
1152
  HYPERVISOR_OPT,
1153
  IALLOCATOR_OPT,
1154
  NET_OPT,
1155
  NODE_PLACEMENT_OPT,
1156
  NOIPCHECK_OPT,
1157
  NONAMECHECK_OPT,
1158
  NONICS_OPT,
1159
  NWSYNC_OPT,
1160
  OSPARAMS_OPT,
1161
  OS_SIZE_OPT,
1162
  SUBMIT_OPT,
1163
  DRY_RUN_OPT,
1164
  PRIORITY_OPT,
1165
  ]
1166

    
1167

    
1168
def _ParseArgs(argv, commands, aliases):
1169
  """Parser for the command line arguments.
1170

1171
  This function parses the arguments and returns the function which
1172
  must be executed together with its (modified) arguments.
1173

1174
  @param argv: the command line
1175
  @param commands: dictionary with special contents, see the design
1176
      doc for cmdline handling
1177
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1178

1179
  """
1180
  if len(argv) == 0:
1181
    binary = "<command>"
1182
  else:
1183
    binary = argv[0].split("/")[-1]
1184

    
1185
  if len(argv) > 1 and argv[1] == "--version":
1186
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1187
             constants.RELEASE_VERSION)
1188
    # Quit right away. That way we don't have to care about this special
1189
    # argument. optparse.py does it the same.
1190
    sys.exit(0)
1191

    
1192
  if len(argv) < 2 or not (argv[1] in commands or
1193
                           argv[1] in aliases):
1194
    # let's do a nice thing
1195
    sortedcmds = commands.keys()
1196
    sortedcmds.sort()
1197

    
1198
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1199
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1200
    ToStdout("")
1201

    
1202
    # compute the max line length for cmd + usage
1203
    mlen = max([len(" %s" % cmd) for cmd in commands])
1204
    mlen = min(60, mlen) # should not get here...
1205

    
1206
    # and format a nice command list
1207
    ToStdout("Commands:")
1208
    for cmd in sortedcmds:
1209
      cmdstr = " %s" % (cmd,)
1210
      help_text = commands[cmd][4]
1211
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1212
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1213
      for line in help_lines:
1214
        ToStdout("%-*s   %s", mlen, "", line)
1215

    
1216
    ToStdout("")
1217

    
1218
    return None, None, None
1219

    
1220
  # get command, unalias it, and look it up in commands
1221
  cmd = argv.pop(1)
1222
  if cmd in aliases:
1223
    if cmd in commands:
1224
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1225
                                   " command" % cmd)
1226

    
1227
    if aliases[cmd] not in commands:
1228
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1229
                                   " command '%s'" % (cmd, aliases[cmd]))
1230

    
1231
    cmd = aliases[cmd]
1232

    
1233
  func, args_def, parser_opts, usage, description = commands[cmd]
1234
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1235
                        description=description,
1236
                        formatter=TitledHelpFormatter(),
1237
                        usage="%%prog %s %s" % (cmd, usage))
1238
  parser.disable_interspersed_args()
1239
  options, args = parser.parse_args()
1240

    
1241
  if not _CheckArguments(cmd, args_def, args):
1242
    return None, None, None
1243

    
1244
  return func, options, args
1245

    
1246

    
1247
def _CheckArguments(cmd, args_def, args):
1248
  """Verifies the arguments using the argument definition.
1249

1250
  Algorithm:
1251

1252
    1. Abort with error if values specified by user but none expected.
1253

1254
    1. For each argument in definition
1255

1256
      1. Keep running count of minimum number of values (min_count)
1257
      1. Keep running count of maximum number of values (max_count)
1258
      1. If it has an unlimited number of values
1259

1260
        1. Abort with error if it's not the last argument in the definition
1261

1262
    1. If last argument has limited number of values
1263

1264
      1. Abort with error if number of values doesn't match or is too large
1265

1266
    1. Abort with error if user didn't pass enough values (min_count)
1267

1268
  """
1269
  if args and not args_def:
1270
    ToStderr("Error: Command %s expects no arguments", cmd)
1271
    return False
1272

    
1273
  min_count = None
1274
  max_count = None
1275
  check_max = None
1276

    
1277
  last_idx = len(args_def) - 1
1278

    
1279
  for idx, arg in enumerate(args_def):
1280
    if min_count is None:
1281
      min_count = arg.min
1282
    elif arg.min is not None:
1283
      min_count += arg.min
1284

    
1285
    if max_count is None:
1286
      max_count = arg.max
1287
    elif arg.max is not None:
1288
      max_count += arg.max
1289

    
1290
    if idx == last_idx:
1291
      check_max = (arg.max is not None)
1292

    
1293
    elif arg.max is None:
1294
      raise errors.ProgrammerError("Only the last argument can have max=None")
1295

    
1296
  if check_max:
1297
    # Command with exact number of arguments
1298
    if (min_count is not None and max_count is not None and
1299
        min_count == max_count and len(args) != min_count):
1300
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1301
      return False
1302

    
1303
    # Command with limited number of arguments
1304
    if max_count is not None and len(args) > max_count:
1305
      ToStderr("Error: Command %s expects only %d argument(s)",
1306
               cmd, max_count)
1307
      return False
1308

    
1309
  # Command with some required arguments
1310
  if min_count is not None and len(args) < min_count:
1311
    ToStderr("Error: Command %s expects at least %d argument(s)",
1312
             cmd, min_count)
1313
    return False
1314

    
1315
  return True
1316

    
1317

    
1318
def SplitNodeOption(value):
1319
  """Splits the value of a --node option.
1320

1321
  """
1322
  if value and ':' in value:
1323
    return value.split(':', 1)
1324
  else:
1325
    return (value, None)
1326

    
1327

    
1328
def CalculateOSNames(os_name, os_variants):
1329
  """Calculates all the names an OS can be called, according to its variants.
1330

1331
  @type os_name: string
1332
  @param os_name: base name of the os
1333
  @type os_variants: list or None
1334
  @param os_variants: list of supported variants
1335
  @rtype: list
1336
  @return: list of valid names
1337

1338
  """
1339
  if os_variants:
1340
    return ['%s+%s' % (os_name, v) for v in os_variants]
1341
  else:
1342
    return [os_name]
1343

    
1344

    
1345
def ParseFields(selected, default):
1346
  """Parses the values of "--field"-like options.
1347

1348
  @type selected: string or None
1349
  @param selected: User-selected options
1350
  @type default: list
1351
  @param default: Default fields
1352

1353
  """
1354
  if selected is None:
1355
    return default
1356

    
1357
  if selected.startswith("+"):
1358
    return default + selected[1:].split(",")
1359

    
1360
  return selected.split(",")
1361

    
1362

    
1363
UsesRPC = rpc.RunWithRPC
1364

    
1365

    
1366
def AskUser(text, choices=None):
1367
  """Ask the user a question.
1368

1369
  @param text: the question to ask
1370

1371
  @param choices: list with elements tuples (input_char, return_value,
1372
      description); if not given, it will default to: [('y', True,
1373
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1374
      note that the '?' char is reserved for help
1375

1376
  @return: one of the return values from the choices list; if input is
1377
      not possible (i.e. not running with a tty, we return the last
1378
      entry from the list
1379

1380
  """
1381
  if choices is None:
1382
    choices = [('y', True, 'Perform the operation'),
1383
               ('n', False, 'Do not perform the operation')]
1384
  if not choices or not isinstance(choices, list):
1385
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1386
  for entry in choices:
1387
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1388
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1389

    
1390
  answer = choices[-1][1]
1391
  new_text = []
1392
  for line in text.splitlines():
1393
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1394
  text = "\n".join(new_text)
1395
  try:
1396
    f = file("/dev/tty", "a+")
1397
  except IOError:
1398
    return answer
1399
  try:
1400
    chars = [entry[0] for entry in choices]
1401
    chars[-1] = "[%s]" % chars[-1]
1402
    chars.append('?')
1403
    maps = dict([(entry[0], entry[1]) for entry in choices])
1404
    while True:
1405
      f.write(text)
1406
      f.write('\n')
1407
      f.write("/".join(chars))
1408
      f.write(": ")
1409
      line = f.readline(2).strip().lower()
1410
      if line in maps:
1411
        answer = maps[line]
1412
        break
1413
      elif line == '?':
1414
        for entry in choices:
1415
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1416
        f.write("\n")
1417
        continue
1418
  finally:
1419
    f.close()
1420
  return answer
1421

    
1422

    
1423
class JobSubmittedException(Exception):
1424
  """Job was submitted, client should exit.
1425

1426
  This exception has one argument, the ID of the job that was
1427
  submitted. The handler should print this ID.
1428

1429
  This is not an error, just a structured way to exit from clients.
1430

1431
  """
1432

    
1433

    
1434
def SendJob(ops, cl=None):
1435
  """Function to submit an opcode without waiting for the results.
1436

1437
  @type ops: list
1438
  @param ops: list of opcodes
1439
  @type cl: luxi.Client
1440
  @param cl: the luxi client to use for communicating with the master;
1441
             if None, a new client will be created
1442

1443
  """
1444
  if cl is None:
1445
    cl = GetClient()
1446

    
1447
  job_id = cl.SubmitJob(ops)
1448

    
1449
  return job_id
1450

    
1451

    
1452
def GenericPollJob(job_id, cbs, report_cbs):
1453
  """Generic job-polling function.
1454

1455
  @type job_id: number
1456
  @param job_id: Job ID
1457
  @type cbs: Instance of L{JobPollCbBase}
1458
  @param cbs: Data callbacks
1459
  @type report_cbs: Instance of L{JobPollReportCbBase}
1460
  @param report_cbs: Reporting callbacks
1461

1462
  """
1463
  prev_job_info = None
1464
  prev_logmsg_serial = None
1465

    
1466
  status = None
1467

    
1468
  while True:
1469
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1470
                                      prev_logmsg_serial)
1471
    if not result:
1472
      # job not found, go away!
1473
      raise errors.JobLost("Job with id %s lost" % job_id)
1474

    
1475
    if result == constants.JOB_NOTCHANGED:
1476
      report_cbs.ReportNotChanged(job_id, status)
1477

    
1478
      # Wait again
1479
      continue
1480

    
1481
    # Split result, a tuple of (field values, log entries)
1482
    (job_info, log_entries) = result
1483
    (status, ) = job_info
1484

    
1485
    if log_entries:
1486
      for log_entry in log_entries:
1487
        (serial, timestamp, log_type, message) = log_entry
1488
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1489
                                    log_type, message)
1490
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1491

    
1492
    # TODO: Handle canceled and archived jobs
1493
    elif status in (constants.JOB_STATUS_SUCCESS,
1494
                    constants.JOB_STATUS_ERROR,
1495
                    constants.JOB_STATUS_CANCELING,
1496
                    constants.JOB_STATUS_CANCELED):
1497
      break
1498

    
1499
    prev_job_info = job_info
1500

    
1501
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1502
  if not jobs:
1503
    raise errors.JobLost("Job with id %s lost" % job_id)
1504

    
1505
  status, opstatus, result = jobs[0]
1506

    
1507
  if status == constants.JOB_STATUS_SUCCESS:
1508
    return result
1509

    
1510
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1511
    raise errors.OpExecError("Job was canceled")
1512

    
1513
  has_ok = False
1514
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1515
    if status == constants.OP_STATUS_SUCCESS:
1516
      has_ok = True
1517
    elif status == constants.OP_STATUS_ERROR:
1518
      errors.MaybeRaise(msg)
1519

    
1520
      if has_ok:
1521
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1522
                                 (idx, msg))
1523

    
1524
      raise errors.OpExecError(str(msg))
1525

    
1526
  # default failure mode
1527
  raise errors.OpExecError(result)
1528

    
1529

    
1530
class JobPollCbBase:
1531
  """Base class for L{GenericPollJob} callbacks.
1532

1533
  """
1534
  def __init__(self):
1535
    """Initializes this class.
1536

1537
    """
1538

    
1539
  def WaitForJobChangeOnce(self, job_id, fields,
1540
                           prev_job_info, prev_log_serial):
1541
    """Waits for changes on a job.
1542

1543
    """
1544
    raise NotImplementedError()
1545

    
1546
  def QueryJobs(self, job_ids, fields):
1547
    """Returns the selected fields for the selected job IDs.
1548

1549
    @type job_ids: list of numbers
1550
    @param job_ids: Job IDs
1551
    @type fields: list of strings
1552
    @param fields: Fields
1553

1554
    """
1555
    raise NotImplementedError()
1556

    
1557

    
1558
class JobPollReportCbBase:
1559
  """Base class for L{GenericPollJob} reporting callbacks.
1560

1561
  """
1562
  def __init__(self):
1563
    """Initializes this class.
1564

1565
    """
1566

    
1567
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1568
    """Handles a log message.
1569

1570
    """
1571
    raise NotImplementedError()
1572

    
1573
  def ReportNotChanged(self, job_id, status):
1574
    """Called for if a job hasn't changed in a while.
1575

1576
    @type job_id: number
1577
    @param job_id: Job ID
1578
    @type status: string or None
1579
    @param status: Job status if available
1580

1581
    """
1582
    raise NotImplementedError()
1583

    
1584

    
1585
class _LuxiJobPollCb(JobPollCbBase):
1586
  def __init__(self, cl):
1587
    """Initializes this class.
1588

1589
    """
1590
    JobPollCbBase.__init__(self)
1591
    self.cl = cl
1592

    
1593
  def WaitForJobChangeOnce(self, job_id, fields,
1594
                           prev_job_info, prev_log_serial):
1595
    """Waits for changes on a job.
1596

1597
    """
1598
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1599
                                        prev_job_info, prev_log_serial)
1600

    
1601
  def QueryJobs(self, job_ids, fields):
1602
    """Returns the selected fields for the selected job IDs.
1603

1604
    """
1605
    return self.cl.QueryJobs(job_ids, fields)
1606

    
1607

    
1608
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1609
  def __init__(self, feedback_fn):
1610
    """Initializes this class.
1611

1612
    """
1613
    JobPollReportCbBase.__init__(self)
1614

    
1615
    self.feedback_fn = feedback_fn
1616

    
1617
    assert callable(feedback_fn)
1618

    
1619
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1620
    """Handles a log message.
1621

1622
    """
1623
    self.feedback_fn((timestamp, log_type, log_msg))
1624

    
1625
  def ReportNotChanged(self, job_id, status):
1626
    """Called if a job hasn't changed in a while.
1627

1628
    """
1629
    # Ignore
1630

    
1631

    
1632
class StdioJobPollReportCb(JobPollReportCbBase):
1633
  def __init__(self):
1634
    """Initializes this class.
1635

1636
    """
1637
    JobPollReportCbBase.__init__(self)
1638

    
1639
    self.notified_queued = False
1640
    self.notified_waitlock = False
1641

    
1642
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1643
    """Handles a log message.
1644

1645
    """
1646
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1647
             FormatLogMessage(log_type, log_msg))
1648

    
1649
  def ReportNotChanged(self, job_id, status):
1650
    """Called if a job hasn't changed in a while.
1651

1652
    """
1653
    if status is None:
1654
      return
1655

    
1656
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1657
      ToStderr("Job %s is waiting in queue", job_id)
1658
      self.notified_queued = True
1659

    
1660
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1661
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1662
      self.notified_waitlock = True
1663

    
1664

    
1665
def FormatLogMessage(log_type, log_msg):
1666
  """Formats a job message according to its type.
1667

1668
  """
1669
  if log_type != constants.ELOG_MESSAGE:
1670
    log_msg = str(log_msg)
1671

    
1672
  return utils.SafeEncode(log_msg)
1673

    
1674

    
1675
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1676
  """Function to poll for the result of a job.
1677

1678
  @type job_id: job identified
1679
  @param job_id: the job to poll for results
1680
  @type cl: luxi.Client
1681
  @param cl: the luxi client to use for communicating with the master;
1682
             if None, a new client will be created
1683

1684
  """
1685
  if cl is None:
1686
    cl = GetClient()
1687

    
1688
  if reporter is None:
1689
    if feedback_fn:
1690
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1691
    else:
1692
      reporter = StdioJobPollReportCb()
1693
  elif feedback_fn:
1694
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1695

    
1696
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1697

    
1698

    
1699
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1700
  """Legacy function to submit an opcode.
1701

1702
  This is just a simple wrapper over the construction of the processor
1703
  instance. It should be extended to better handle feedback and
1704
  interaction functions.
1705

1706
  """
1707
  if cl is None:
1708
    cl = GetClient()
1709

    
1710
  SetGenericOpcodeOpts([op], opts)
1711

    
1712
  job_id = SendJob([op], cl=cl)
1713

    
1714
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1715
                       reporter=reporter)
1716

    
1717
  return op_results[0]
1718

    
1719

    
1720
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1721
  """Wrapper around SubmitOpCode or SendJob.
1722

1723
  This function will decide, based on the 'opts' parameter, whether to
1724
  submit and wait for the result of the opcode (and return it), or
1725
  whether to just send the job and print its identifier. It is used in
1726
  order to simplify the implementation of the '--submit' option.
1727

1728
  It will also process the opcodes if we're sending the via SendJob
1729
  (otherwise SubmitOpCode does it).
1730

1731
  """
1732
  if opts and opts.submit_only:
1733
    job = [op]
1734
    SetGenericOpcodeOpts(job, opts)
1735
    job_id = SendJob(job, cl=cl)
1736
    raise JobSubmittedException(job_id)
1737
  else:
1738
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1739

    
1740

    
1741
def SetGenericOpcodeOpts(opcode_list, options):
1742
  """Processor for generic options.
1743

1744
  This function updates the given opcodes based on generic command
1745
  line options (like debug, dry-run, etc.).
1746

1747
  @param opcode_list: list of opcodes
1748
  @param options: command line options or None
1749
  @return: None (in-place modification)
1750

1751
  """
1752
  if not options:
1753
    return
1754
  for op in opcode_list:
1755
    op.debug_level = options.debug
1756
    if hasattr(options, "dry_run"):
1757
      op.dry_run = options.dry_run
1758
    if getattr(options, "priority", None) is not None:
1759
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1760

    
1761

    
1762
def GetClient():
1763
  # TODO: Cache object?
1764
  try:
1765
    client = luxi.Client()
1766
  except luxi.NoMasterError:
1767
    ss = ssconf.SimpleStore()
1768

    
1769
    # Try to read ssconf file
1770
    try:
1771
      ss.GetMasterNode()
1772
    except errors.ConfigurationError:
1773
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1774
                                 " not part of a cluster")
1775

    
1776
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1777
    if master != myself:
1778
      raise errors.OpPrereqError("This is not the master node, please connect"
1779
                                 " to node '%s' and rerun the command" %
1780
                                 master)
1781
    raise
1782
  return client
1783

    
1784

    
1785
def FormatError(err):
1786
  """Return a formatted error message for a given error.
1787

1788
  This function takes an exception instance and returns a tuple
1789
  consisting of two values: first, the recommended exit code, and
1790
  second, a string describing the error message (not
1791
  newline-terminated).
1792

1793
  """
1794
  retcode = 1
1795
  obuf = StringIO()
1796
  msg = str(err)
1797
  if isinstance(err, errors.ConfigurationError):
1798
    txt = "Corrupt configuration file: %s" % msg
1799
    logging.error(txt)
1800
    obuf.write(txt + "\n")
1801
    obuf.write("Aborting.")
1802
    retcode = 2
1803
  elif isinstance(err, errors.HooksAbort):
1804
    obuf.write("Failure: hooks execution failed:\n")
1805
    for node, script, out in err.args[0]:
1806
      if out:
1807
        obuf.write("  node: %s, script: %s, output: %s\n" %
1808
                   (node, script, out))
1809
      else:
1810
        obuf.write("  node: %s, script: %s (no output)\n" %
1811
                   (node, script))
1812
  elif isinstance(err, errors.HooksFailure):
1813
    obuf.write("Failure: hooks general failure: %s" % msg)
1814
  elif isinstance(err, errors.ResolverError):
1815
    this_host = netutils.Hostname.GetSysName()
1816
    if err.args[0] == this_host:
1817
      msg = "Failure: can't resolve my own hostname ('%s')"
1818
    else:
1819
      msg = "Failure: can't resolve hostname '%s'"
1820
    obuf.write(msg % err.args[0])
1821
  elif isinstance(err, errors.OpPrereqError):
1822
    if len(err.args) == 2:
1823
      obuf.write("Failure: prerequisites not met for this"
1824
               " operation:\nerror type: %s, error details:\n%s" %
1825
                 (err.args[1], err.args[0]))
1826
    else:
1827
      obuf.write("Failure: prerequisites not met for this"
1828
                 " operation:\n%s" % msg)
1829
  elif isinstance(err, errors.OpExecError):
1830
    obuf.write("Failure: command execution error:\n%s" % msg)
1831
  elif isinstance(err, errors.TagError):
1832
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1833
  elif isinstance(err, errors.JobQueueDrainError):
1834
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1835
               " accept new requests\n")
1836
  elif isinstance(err, errors.JobQueueFull):
1837
    obuf.write("Failure: the job queue is full and doesn't accept new"
1838
               " job submissions until old jobs are archived\n")
1839
  elif isinstance(err, errors.TypeEnforcementError):
1840
    obuf.write("Parameter Error: %s" % msg)
1841
  elif isinstance(err, errors.ParameterError):
1842
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1843
  elif isinstance(err, luxi.NoMasterError):
1844
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1845
               " and listening for connections?")
1846
  elif isinstance(err, luxi.TimeoutError):
1847
    obuf.write("Timeout while talking to the master daemon. Error:\n"
1848
               "%s" % msg)
1849
  elif isinstance(err, luxi.PermissionError):
1850
    obuf.write("It seems you don't have permissions to connect to the"
1851
               " master daemon.\nPlease retry as a different user.")
1852
  elif isinstance(err, luxi.ProtocolError):
1853
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1854
               "%s" % msg)
1855
  elif isinstance(err, errors.JobLost):
1856
    obuf.write("Error checking job status: %s" % msg)
1857
  elif isinstance(err, errors.GenericError):
1858
    obuf.write("Unhandled Ganeti error: %s" % msg)
1859
  elif isinstance(err, JobSubmittedException):
1860
    obuf.write("JobID: %s\n" % err.args[0])
1861
    retcode = 0
1862
  else:
1863
    obuf.write("Unhandled exception: %s" % msg)
1864
  return retcode, obuf.getvalue().rstrip('\n')
1865

    
1866

    
1867
def GenericMain(commands, override=None, aliases=None):
1868
  """Generic main function for all the gnt-* commands.
1869

1870
  Arguments:
1871
    - commands: a dictionary with a special structure, see the design doc
1872
                for command line handling.
1873
    - override: if not None, we expect a dictionary with keys that will
1874
                override command line options; this can be used to pass
1875
                options from the scripts to generic functions
1876
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1877

1878
  """
1879
  # save the program name and the entire command line for later logging
1880
  if sys.argv:
1881
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1882
    if len(sys.argv) >= 2:
1883
      binary += " " + sys.argv[1]
1884
      old_cmdline = " ".join(sys.argv[2:])
1885
    else:
1886
      old_cmdline = ""
1887
  else:
1888
    binary = "<unknown program>"
1889
    old_cmdline = ""
1890

    
1891
  if aliases is None:
1892
    aliases = {}
1893

    
1894
  try:
1895
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1896
  except errors.ParameterError, err:
1897
    result, err_msg = FormatError(err)
1898
    ToStderr(err_msg)
1899
    return 1
1900

    
1901
  if func is None: # parse error
1902
    return 1
1903

    
1904
  if override is not None:
1905
    for key, val in override.iteritems():
1906
      setattr(options, key, val)
1907

    
1908
  utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug,
1909
                     stderr_logging=True, program=binary)
1910

    
1911
  if old_cmdline:
1912
    logging.info("run with arguments '%s'", old_cmdline)
1913
  else:
1914
    logging.info("run with no arguments")
1915

    
1916
  try:
1917
    result = func(options, args)
1918
  except (errors.GenericError, luxi.ProtocolError,
1919
          JobSubmittedException), err:
1920
    result, err_msg = FormatError(err)
1921
    logging.exception("Error during command processing")
1922
    ToStderr(err_msg)
1923

    
1924
  return result
1925

    
1926

    
1927
def ParseNicOption(optvalue):
1928
  """Parses the value of the --net option(s).
1929

1930
  """
1931
  try:
1932
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1933
  except (TypeError, ValueError), err:
1934
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1935

    
1936
  nics = [{}] * nic_max
1937
  for nidx, ndict in optvalue:
1938
    nidx = int(nidx)
1939

    
1940
    if not isinstance(ndict, dict):
1941
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1942
                                 " got %s" % (nidx, ndict))
1943

    
1944
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1945

    
1946
    nics[nidx] = ndict
1947

    
1948
  return nics
1949

    
1950

    
1951
def GenericInstanceCreate(mode, opts, args):
1952
  """Add an instance to the cluster via either creation or import.
1953

1954
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1955
  @param opts: the command line options selected by the user
1956
  @type args: list
1957
  @param args: should contain only one element, the new instance name
1958
  @rtype: int
1959
  @return: the desired exit code
1960

1961
  """
1962
  instance = args[0]
1963

    
1964
  (pnode, snode) = SplitNodeOption(opts.node)
1965

    
1966
  hypervisor = None
1967
  hvparams = {}
1968
  if opts.hypervisor:
1969
    hypervisor, hvparams = opts.hypervisor
1970

    
1971
  if opts.nics:
1972
    nics = ParseNicOption(opts.nics)
1973
  elif opts.no_nics:
1974
    # no nics
1975
    nics = []
1976
  elif mode == constants.INSTANCE_CREATE:
1977
    # default of one nic, all auto
1978
    nics = [{}]
1979
  else:
1980
    # mode == import
1981
    nics = []
1982

    
1983
  if opts.disk_template == constants.DT_DISKLESS:
1984
    if opts.disks or opts.sd_size is not None:
1985
      raise errors.OpPrereqError("Diskless instance but disk"
1986
                                 " information passed")
1987
    disks = []
1988
  else:
1989
    if (not opts.disks and not opts.sd_size
1990
        and mode == constants.INSTANCE_CREATE):
1991
      raise errors.OpPrereqError("No disk information specified")
1992
    if opts.disks and opts.sd_size is not None:
1993
      raise errors.OpPrereqError("Please use either the '--disk' or"
1994
                                 " '-s' option")
1995
    if opts.sd_size is not None:
1996
      opts.disks = [(0, {"size": opts.sd_size})]
1997

    
1998
    if opts.disks:
1999
      try:
2000
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2001
      except ValueError, err:
2002
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2003
      disks = [{}] * disk_max
2004
    else:
2005
      disks = []
2006
    for didx, ddict in opts.disks:
2007
      didx = int(didx)
2008
      if not isinstance(ddict, dict):
2009
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2010
        raise errors.OpPrereqError(msg)
2011
      elif "size" in ddict:
2012
        if "adopt" in ddict:
2013
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2014
                                     " (disk %d)" % didx)
2015
        try:
2016
          ddict["size"] = utils.ParseUnit(ddict["size"])
2017
        except ValueError, err:
2018
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2019
                                     (didx, err))
2020
      elif "adopt" in ddict:
2021
        if mode == constants.INSTANCE_IMPORT:
2022
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2023
                                     " import")
2024
        ddict["size"] = 0
2025
      else:
2026
        raise errors.OpPrereqError("Missing size or adoption source for"
2027
                                   " disk %d" % didx)
2028
      disks[didx] = ddict
2029

    
2030
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2031
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2032

    
2033
  if mode == constants.INSTANCE_CREATE:
2034
    start = opts.start
2035
    os_type = opts.os
2036
    force_variant = opts.force_variant
2037
    src_node = None
2038
    src_path = None
2039
    no_install = opts.no_install
2040
    identify_defaults = False
2041
  elif mode == constants.INSTANCE_IMPORT:
2042
    start = False
2043
    os_type = None
2044
    force_variant = False
2045
    src_node = opts.src_node
2046
    src_path = opts.src_dir
2047
    no_install = None
2048
    identify_defaults = opts.identify_defaults
2049
  else:
2050
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2051

    
2052
  op = opcodes.OpCreateInstance(instance_name=instance,
2053
                                disks=disks,
2054
                                disk_template=opts.disk_template,
2055
                                nics=nics,
2056
                                pnode=pnode, snode=snode,
2057
                                ip_check=opts.ip_check,
2058
                                name_check=opts.name_check,
2059
                                wait_for_sync=opts.wait_for_sync,
2060
                                file_storage_dir=opts.file_storage_dir,
2061
                                file_driver=opts.file_driver,
2062
                                iallocator=opts.iallocator,
2063
                                hypervisor=hypervisor,
2064
                                hvparams=hvparams,
2065
                                beparams=opts.beparams,
2066
                                osparams=opts.osparams,
2067
                                mode=mode,
2068
                                start=start,
2069
                                os_type=os_type,
2070
                                force_variant=force_variant,
2071
                                src_node=src_node,
2072
                                src_path=src_path,
2073
                                no_install=no_install,
2074
                                identify_defaults=identify_defaults)
2075

    
2076
  SubmitOrSend(op, opts)
2077
  return 0
2078

    
2079

    
2080
class _RunWhileClusterStoppedHelper:
2081
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2082

2083
  """
2084
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2085
    """Initializes this class.
2086

2087
    @type feedback_fn: callable
2088
    @param feedback_fn: Feedback function
2089
    @type cluster_name: string
2090
    @param cluster_name: Cluster name
2091
    @type master_node: string
2092
    @param master_node Master node name
2093
    @type online_nodes: list
2094
    @param online_nodes: List of names of online nodes
2095

2096
    """
2097
    self.feedback_fn = feedback_fn
2098
    self.cluster_name = cluster_name
2099
    self.master_node = master_node
2100
    self.online_nodes = online_nodes
2101

    
2102
    self.ssh = ssh.SshRunner(self.cluster_name)
2103

    
2104
    self.nonmaster_nodes = [name for name in online_nodes
2105
                            if name != master_node]
2106

    
2107
    assert self.master_node not in self.nonmaster_nodes
2108

    
2109
  def _RunCmd(self, node_name, cmd):
2110
    """Runs a command on the local or a remote machine.
2111

2112
    @type node_name: string
2113
    @param node_name: Machine name
2114
    @type cmd: list
2115
    @param cmd: Command
2116

2117
    """
2118
    if node_name is None or node_name == self.master_node:
2119
      # No need to use SSH
2120
      result = utils.RunCmd(cmd)
2121
    else:
2122
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2123

    
2124
    if result.failed:
2125
      errmsg = ["Failed to run command %s" % result.cmd]
2126
      if node_name:
2127
        errmsg.append("on node %s" % node_name)
2128
      errmsg.append(": exitcode %s and error %s" %
2129
                    (result.exit_code, result.output))
2130
      raise errors.OpExecError(" ".join(errmsg))
2131

    
2132
  def Call(self, fn, *args):
2133
    """Call function while all daemons are stopped.
2134

2135
    @type fn: callable
2136
    @param fn: Function to be called
2137

2138
    """
2139
    # Pause watcher by acquiring an exclusive lock on watcher state file
2140
    self.feedback_fn("Blocking watcher")
2141
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2142
    try:
2143
      # TODO: Currently, this just blocks. There's no timeout.
2144
      # TODO: Should it be a shared lock?
2145
      watcher_block.Exclusive(blocking=True)
2146

    
2147
      # Stop master daemons, so that no new jobs can come in and all running
2148
      # ones are finished
2149
      self.feedback_fn("Stopping master daemons")
2150
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2151
      try:
2152
        # Stop daemons on all nodes
2153
        for node_name in self.online_nodes:
2154
          self.feedback_fn("Stopping daemons on %s" % node_name)
2155
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2156

    
2157
        # All daemons are shut down now
2158
        try:
2159
          return fn(self, *args)
2160
        except Exception, err:
2161
          _, errmsg = FormatError(err)
2162
          logging.exception("Caught exception")
2163
          self.feedback_fn(errmsg)
2164
          raise
2165
      finally:
2166
        # Start cluster again, master node last
2167
        for node_name in self.nonmaster_nodes + [self.master_node]:
2168
          self.feedback_fn("Starting daemons on %s" % node_name)
2169
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2170
    finally:
2171
      # Resume watcher
2172
      watcher_block.Close()
2173

    
2174

    
2175
def RunWhileClusterStopped(feedback_fn, fn, *args):
2176
  """Calls a function while all cluster daemons are stopped.
2177

2178
  @type feedback_fn: callable
2179
  @param feedback_fn: Feedback function
2180
  @type fn: callable
2181
  @param fn: Function to be called when daemons are stopped
2182

2183
  """
2184
  feedback_fn("Gathering cluster information")
2185

    
2186
  # This ensures we're running on the master daemon
2187
  cl = GetClient()
2188

    
2189
  (cluster_name, master_node) = \
2190
    cl.QueryConfigValues(["cluster_name", "master_node"])
2191

    
2192
  online_nodes = GetOnlineNodes([], cl=cl)
2193

    
2194
  # Don't keep a reference to the client. The master daemon will go away.
2195
  del cl
2196

    
2197
  assert master_node in online_nodes
2198

    
2199
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2200
                                       online_nodes).Call(fn, *args)
2201

    
2202

    
2203
def GenerateTable(headers, fields, separator, data,
2204
                  numfields=None, unitfields=None,
2205
                  units=None):
2206
  """Prints a table with headers and different fields.
2207

2208
  @type headers: dict
2209
  @param headers: dictionary mapping field names to headers for
2210
      the table
2211
  @type fields: list
2212
  @param fields: the field names corresponding to each row in
2213
      the data field
2214
  @param separator: the separator to be used; if this is None,
2215
      the default 'smart' algorithm is used which computes optimal
2216
      field width, otherwise just the separator is used between
2217
      each field
2218
  @type data: list
2219
  @param data: a list of lists, each sublist being one row to be output
2220
  @type numfields: list
2221
  @param numfields: a list with the fields that hold numeric
2222
      values and thus should be right-aligned
2223
  @type unitfields: list
2224
  @param unitfields: a list with the fields that hold numeric
2225
      values that should be formatted with the units field
2226
  @type units: string or None
2227
  @param units: the units we should use for formatting, or None for
2228
      automatic choice (human-readable for non-separator usage, otherwise
2229
      megabytes); this is a one-letter string
2230

2231
  """
2232
  if units is None:
2233
    if separator:
2234
      units = "m"
2235
    else:
2236
      units = "h"
2237

    
2238
  if numfields is None:
2239
    numfields = []
2240
  if unitfields is None:
2241
    unitfields = []
2242

    
2243
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2244
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2245

    
2246
  format_fields = []
2247
  for field in fields:
2248
    if headers and field not in headers:
2249
      # TODO: handle better unknown fields (either revert to old
2250
      # style of raising exception, or deal more intelligently with
2251
      # variable fields)
2252
      headers[field] = field
2253
    if separator is not None:
2254
      format_fields.append("%s")
2255
    elif numfields.Matches(field):
2256
      format_fields.append("%*s")
2257
    else:
2258
      format_fields.append("%-*s")
2259

    
2260
  if separator is None:
2261
    mlens = [0 for name in fields]
2262
    format_str = ' '.join(format_fields)
2263
  else:
2264
    format_str = separator.replace("%", "%%").join(format_fields)
2265

    
2266
  for row in data:
2267
    if row is None:
2268
      continue
2269
    for idx, val in enumerate(row):
2270
      if unitfields.Matches(fields[idx]):
2271
        try:
2272
          val = int(val)
2273
        except (TypeError, ValueError):
2274
          pass
2275
        else:
2276
          val = row[idx] = utils.FormatUnit(val, units)
2277
      val = row[idx] = str(val)
2278
      if separator is None:
2279
        mlens[idx] = max(mlens[idx], len(val))
2280

    
2281
  result = []
2282
  if headers:
2283
    args = []
2284
    for idx, name in enumerate(fields):
2285
      hdr = headers[name]
2286
      if separator is None:
2287
        mlens[idx] = max(mlens[idx], len(hdr))
2288
        args.append(mlens[idx])
2289
      args.append(hdr)
2290
    result.append(format_str % tuple(args))
2291

    
2292
  if separator is None:
2293
    assert len(mlens) == len(fields)
2294

    
2295
    if fields and not numfields.Matches(fields[-1]):
2296
      mlens[-1] = 0
2297

    
2298
  for line in data:
2299
    args = []
2300
    if line is None:
2301
      line = ['-' for _ in fields]
2302
    for idx in range(len(fields)):
2303
      if separator is None:
2304
        args.append(mlens[idx])
2305
      args.append(line[idx])
2306
    result.append(format_str % tuple(args))
2307

    
2308
  return result
2309

    
2310

    
2311
def _FormatBool(value):
2312
  """Formats a boolean value as a string.
2313

2314
  """
2315
  if value:
2316
    return "Y"
2317
  return "N"
2318

    
2319

    
2320
#: Default formatting for query results; (callback, align right)
2321
_DEFAULT_FORMAT_QUERY = {
2322
  constants.QFT_TEXT: (str, False),
2323
  constants.QFT_BOOL: (_FormatBool, False),
2324
  constants.QFT_NUMBER: (str, True),
2325
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2326
  constants.QFT_OTHER: (str, False),
2327
  constants.QFT_UNKNOWN: (str, False),
2328
  }
2329

    
2330

    
2331
def _GetColumnFormatter(fdef, override, unit):
2332
  """Returns formatting function for a field.
2333

2334
  @type fdef: L{objects.QueryFieldDefinition}
2335
  @type override: dict
2336
  @param override: Dictionary for overriding field formatting functions,
2337
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2338
  @type unit: string
2339
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2340
  @rtype: tuple; (callable, bool)
2341
  @return: Returns the function to format a value (takes one parameter) and a
2342
    boolean for aligning the value on the right-hand side
2343

2344
  """
2345
  fmt = override.get(fdef.name, None)
2346
  if fmt is not None:
2347
    return fmt
2348

    
2349
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2350

    
2351
  if fdef.kind == constants.QFT_UNIT:
2352
    # Can't keep this information in the static dictionary
2353
    return (lambda value: utils.FormatUnit(value, unit), True)
2354

    
2355
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2356
  if fmt is not None:
2357
    return fmt
2358

    
2359
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2360

    
2361

    
2362
class _QueryColumnFormatter:
2363
  """Callable class for formatting fields of a query.
2364

2365
  """
2366
  def __init__(self, fn, status_fn):
2367
    """Initializes this class.
2368

2369
    @type fn: callable
2370
    @param fn: Formatting function
2371
    @type status_fn: callable
2372
    @param status_fn: Function to report fields' status
2373

2374
    """
2375
    self._fn = fn
2376
    self._status_fn = status_fn
2377

    
2378
  def __call__(self, data):
2379
    """Returns a field's string representation.
2380

2381
    """
2382
    (status, value) = data
2383

    
2384
    # Report status
2385
    self._status_fn(status)
2386

    
2387
    if status == constants.QRFS_NORMAL:
2388
      return self._fn(value)
2389

    
2390
    assert value is None, \
2391
           "Found value %r for abnormal status %s" % (value, status)
2392

    
2393
    if status == constants.QRFS_UNKNOWN:
2394
      return "<unknown>"
2395

    
2396
    if status == constants.QRFS_NODATA:
2397
      return "<nodata>"
2398

    
2399
    if status == constants.QRFS_UNAVAIL:
2400
      return "<unavail>"
2401

    
2402
    raise NotImplementedError("Unknown status %s" % status)
2403

    
2404

    
2405
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2406
                      header=False):
2407
  """Formats data in L{objects.QueryResponse}.
2408

2409
  @type result: L{objects.QueryResponse}
2410
  @param result: result of query operation
2411
  @type unit: string
2412
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2413
    see L{utils.FormatUnit}
2414
  @type format_override: dict
2415
  @param format_override: Dictionary for overriding field formatting functions,
2416
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2417
  @type separator: string or None
2418
  @param separator: String used to separate fields
2419
  @type header: bool
2420
  @param header: Whether to output header row
2421

2422
  """
2423
  if unit is None:
2424
    if separator:
2425
      unit = "m"
2426
    else:
2427
      unit = "h"
2428

    
2429
  if format_override is None:
2430
    format_override = {}
2431

    
2432
  stats = dict.fromkeys(constants.QRFS_ALL, 0)
2433

    
2434
  def _RecordStatus(status):
2435
    if status in stats:
2436
      stats[status] += 1
2437

    
2438
  columns = []
2439
  for fdef in result.fields:
2440
    assert fdef.title and fdef.name
2441
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2442
    columns.append(TableColumn(fdef.title,
2443
                               _QueryColumnFormatter(fn, _RecordStatus),
2444
                               align_right))
2445

    
2446
  table = FormatTable(result.data, columns, header, separator)
2447

    
2448
  # Collect statistics
2449
  assert len(stats) == len(constants.QRFS_ALL)
2450
  assert compat.all(count >= 0 for count in stats.values())
2451

    
2452
  # Determine overall status. If there was no data, unknown fields must be
2453
  # detected via the field definitions.
2454
  if (stats[constants.QRFS_UNKNOWN] or
2455
      (not result.data and _GetUnknownFields(result.fields))):
2456
    status = QR_UNKNOWN
2457
  elif compat.any(count > 0 for key, count in stats.items()
2458
                  if key != constants.QRFS_NORMAL):
2459
    status = QR_INCOMPLETE
2460
  else:
2461
    status = QR_NORMAL
2462

    
2463
  return (status, table)
2464

    
2465

    
2466
def _GetUnknownFields(fdefs):
2467
  """Returns list of unknown fields included in C{fdefs}.
2468

2469
  @type fdefs: list of L{objects.QueryFieldDefinition}
2470

2471
  """
2472
  return [fdef for fdef in fdefs
2473
          if fdef.kind == constants.QFT_UNKNOWN]
2474

    
2475

    
2476
def _WarnUnknownFields(fdefs):
2477
  """Prints a warning to stderr if a query included unknown fields.
2478

2479
  @type fdefs: list of L{objects.QueryFieldDefinition}
2480

2481
  """
2482
  unknown = _GetUnknownFields(fdefs)
2483
  if unknown:
2484
    ToStderr("Warning: Queried for unknown fields %s",
2485
             utils.CommaJoin(fdef.name for fdef in unknown))
2486
    return True
2487

    
2488
  return False
2489

    
2490

    
2491
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2492
                format_override=None):
2493
  """Generic implementation for listing all items of a resource.
2494

2495
  @param resource: One of L{constants.QR_OP_LUXI}
2496
  @type fields: list of strings
2497
  @param fields: List of fields to query for
2498
  @type names: list of strings
2499
  @param names: Names of items to query for
2500
  @type unit: string or None
2501
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2502
    None for automatic choice (human-readable for non-separator usage,
2503
    otherwise megabytes); this is a one-letter string
2504
  @type separator: string or None
2505
  @param separator: String used to separate fields
2506
  @type header: bool
2507
  @param header: Whether to show header row
2508
  @type format_override: dict
2509
  @param format_override: Dictionary for overriding field formatting functions,
2510
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2511

2512
  """
2513
  if cl is None:
2514
    cl = GetClient()
2515

    
2516
  if not names:
2517
    names = None
2518

    
2519
  response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2520

    
2521
  found_unknown = _WarnUnknownFields(response.fields)
2522

    
2523
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2524
                                     header=header,
2525
                                     format_override=format_override)
2526

    
2527
  for line in data:
2528
    ToStdout(line)
2529

    
2530
  assert ((found_unknown and status == QR_UNKNOWN) or
2531
          (not found_unknown and status != QR_UNKNOWN))
2532

    
2533
  if status == QR_UNKNOWN:
2534
    return constants.EXIT_UNKNOWN_FIELD
2535

    
2536
  # TODO: Should the list command fail if not all data could be collected?
2537
  return constants.EXIT_SUCCESS
2538

    
2539

    
2540
def GenericListFields(resource, fields, separator, header, cl=None):
2541
  """Generic implementation for listing fields for a resource.
2542

2543
  @param resource: One of L{constants.QR_OP_LUXI}
2544
  @type fields: list of strings
2545
  @param fields: List of fields to query for
2546
  @type separator: string or None
2547
  @param separator: String used to separate fields
2548
  @type header: bool
2549
  @param header: Whether to show header row
2550

2551
  """
2552
  if cl is None:
2553
    cl = GetClient()
2554

    
2555
  if not fields:
2556
    fields = None
2557

    
2558
  response = cl.QueryFields(resource, fields)
2559

    
2560
  found_unknown = _WarnUnknownFields(response.fields)
2561

    
2562
  columns = [
2563
    TableColumn("Name", str, False),
2564
    TableColumn("Title", str, False),
2565
    # TODO: Add field description to master daemon
2566
    ]
2567

    
2568
  rows = [[fdef.name, fdef.title] for fdef in response.fields]
2569

    
2570
  for line in FormatTable(rows, columns, header, separator):
2571
    ToStdout(line)
2572

    
2573
  if found_unknown:
2574
    return constants.EXIT_UNKNOWN_FIELD
2575

    
2576
  return constants.EXIT_SUCCESS
2577

    
2578

    
2579
class TableColumn:
2580
  """Describes a column for L{FormatTable}.
2581

2582
  """
2583
  def __init__(self, title, fn, align_right):
2584
    """Initializes this class.
2585

2586
    @type title: string
2587
    @param title: Column title
2588
    @type fn: callable
2589
    @param fn: Formatting function
2590
    @type align_right: bool
2591
    @param align_right: Whether to align values on the right-hand side
2592

2593
    """
2594
    self.title = title
2595
    self.format = fn
2596
    self.align_right = align_right
2597

    
2598

    
2599
def _GetColFormatString(width, align_right):
2600
  """Returns the format string for a field.
2601

2602
  """
2603
  if align_right:
2604
    sign = ""
2605
  else:
2606
    sign = "-"
2607

    
2608
  return "%%%s%ss" % (sign, width)
2609

    
2610

    
2611
def FormatTable(rows, columns, header, separator):
2612
  """Formats data as a table.
2613

2614
  @type rows: list of lists
2615
  @param rows: Row data, one list per row
2616
  @type columns: list of L{TableColumn}
2617
  @param columns: Column descriptions
2618
  @type header: bool
2619
  @param header: Whether to show header row
2620
  @type separator: string or None
2621
  @param separator: String used to separate columns
2622

2623
  """
2624
  if header:
2625
    data = [[col.title for col in columns]]
2626
    colwidth = [len(col.title) for col in columns]
2627
  else:
2628
    data = []
2629
    colwidth = [0 for _ in columns]
2630

    
2631
  # Format row data
2632
  for row in rows:
2633
    assert len(row) == len(columns)
2634

    
2635
    formatted = [col.format(value) for value, col in zip(row, columns)]
2636

    
2637
    if separator is None:
2638
      # Update column widths
2639
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2640
        # Modifying a list's items while iterating is fine
2641
        colwidth[idx] = max(oldwidth, len(value))
2642

    
2643
    data.append(formatted)
2644

    
2645
  if separator is not None:
2646
    # Return early if a separator is used
2647
    return [separator.join(row) for row in data]
2648

    
2649
  if columns and not columns[-1].align_right:
2650
    # Avoid unnecessary spaces at end of line
2651
    colwidth[-1] = 0
2652

    
2653
  # Build format string
2654
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2655
                  for col, width in zip(columns, colwidth)])
2656

    
2657
  return [fmt % tuple(row) for row in data]
2658

    
2659

    
2660
def FormatTimestamp(ts):
2661
  """Formats a given timestamp.
2662

2663
  @type ts: timestamp
2664
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2665

2666
  @rtype: string
2667
  @return: a string with the formatted timestamp
2668

2669
  """
2670
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2671
    return '?'
2672
  sec, usec = ts
2673
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2674

    
2675

    
2676
def ParseTimespec(value):
2677
  """Parse a time specification.
2678

2679
  The following suffixed will be recognized:
2680

2681
    - s: seconds
2682
    - m: minutes
2683
    - h: hours
2684
    - d: day
2685
    - w: weeks
2686

2687
  Without any suffix, the value will be taken to be in seconds.
2688

2689
  """
2690
  value = str(value)
2691
  if not value:
2692
    raise errors.OpPrereqError("Empty time specification passed")
2693
  suffix_map = {
2694
    's': 1,
2695
    'm': 60,
2696
    'h': 3600,
2697
    'd': 86400,
2698
    'w': 604800,
2699
    }
2700
  if value[-1] not in suffix_map:
2701
    try:
2702
      value = int(value)
2703
    except (TypeError, ValueError):
2704
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2705
  else:
2706
    multiplier = suffix_map[value[-1]]
2707
    value = value[:-1]
2708
    if not value: # no data left after stripping the suffix
2709
      raise errors.OpPrereqError("Invalid time specification (only"
2710
                                 " suffix passed)")
2711
    try:
2712
      value = int(value) * multiplier
2713
    except (TypeError, ValueError):
2714
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2715
  return value
2716

    
2717

    
2718
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2719
                   filter_master=False):
2720
  """Returns the names of online nodes.
2721

2722
  This function will also log a warning on stderr with the names of
2723
  the online nodes.
2724

2725
  @param nodes: if not empty, use only this subset of nodes (minus the
2726
      offline ones)
2727
  @param cl: if not None, luxi client to use
2728
  @type nowarn: boolean
2729
  @param nowarn: by default, this function will output a note with the
2730
      offline nodes that are skipped; if this parameter is True the
2731
      note is not displayed
2732
  @type secondary_ips: boolean
2733
  @param secondary_ips: if True, return the secondary IPs instead of the
2734
      names, useful for doing network traffic over the replication interface
2735
      (if any)
2736
  @type filter_master: boolean
2737
  @param filter_master: if True, do not return the master node in the list
2738
      (useful in coordination with secondary_ips where we cannot check our
2739
      node name against the list)
2740

2741
  """
2742
  if cl is None:
2743
    cl = GetClient()
2744

    
2745
  if secondary_ips:
2746
    name_idx = 2
2747
  else:
2748
    name_idx = 0
2749

    
2750
  if filter_master:
2751
    master_node = cl.QueryConfigValues(["master_node"])[0]
2752
    filter_fn = lambda x: x != master_node
2753
  else:
2754
    filter_fn = lambda _: True
2755

    
2756
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2757
                         use_locking=False)
2758
  offline = [row[0] for row in result if row[1]]
2759
  if offline and not nowarn:
2760
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2761
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2762

    
2763

    
2764
def _ToStream(stream, txt, *args):
2765
  """Write a message to a stream, bypassing the logging system
2766

2767
  @type stream: file object
2768
  @param stream: the file to which we should write
2769
  @type txt: str
2770
  @param txt: the message
2771

2772
  """
2773
  if args:
2774
    args = tuple(args)
2775
    stream.write(txt % args)
2776
  else:
2777
    stream.write(txt)
2778
  stream.write('\n')
2779
  stream.flush()
2780

    
2781

    
2782
def ToStdout(txt, *args):
2783
  """Write a message to stdout only, bypassing the logging system
2784

2785
  This is just a wrapper over _ToStream.
2786

2787
  @type txt: str
2788
  @param txt: the message
2789

2790
  """
2791
  _ToStream(sys.stdout, txt, *args)
2792

    
2793

    
2794
def ToStderr(txt, *args):
2795
  """Write a message to stderr only, bypassing the logging system
2796

2797
  This is just a wrapper over _ToStream.
2798

2799
  @type txt: str
2800
  @param txt: the message
2801

2802
  """
2803
  _ToStream(sys.stderr, txt, *args)
2804

    
2805

    
2806
class JobExecutor(object):
2807
  """Class which manages the submission and execution of multiple jobs.
2808

2809
  Note that instances of this class should not be reused between
2810
  GetResults() calls.
2811

2812
  """
2813
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2814
    self.queue = []
2815
    if cl is None:
2816
      cl = GetClient()
2817
    self.cl = cl
2818
    self.verbose = verbose
2819
    self.jobs = []
2820
    self.opts = opts
2821
    self.feedback_fn = feedback_fn
2822

    
2823
  def QueueJob(self, name, *ops):
2824
    """Record a job for later submit.
2825

2826
    @type name: string
2827
    @param name: a description of the job, will be used in WaitJobSet
2828
    """
2829
    SetGenericOpcodeOpts(ops, self.opts)
2830
    self.queue.append((name, ops))
2831

    
2832
  def SubmitPending(self, each=False):
2833
    """Submit all pending jobs.
2834

2835
    """
2836
    if each:
2837
      results = []
2838
      for row in self.queue:
2839
        # SubmitJob will remove the success status, but raise an exception if
2840
        # the submission fails, so we'll notice that anyway.
2841
        results.append([True, self.cl.SubmitJob(row[1])])
2842
    else:
2843
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2844
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2845
                                                            self.queue)):
2846
      self.jobs.append((idx, status, data, name))
2847

    
2848
  def _ChooseJob(self):
2849
    """Choose a non-waiting/queued job to poll next.
2850

2851
    """
2852
    assert self.jobs, "_ChooseJob called with empty job list"
2853

    
2854
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2855
    assert result
2856

    
2857
    for job_data, status in zip(self.jobs, result):
2858
      if (isinstance(status, list) and status and
2859
          status[0] in (constants.JOB_STATUS_QUEUED,
2860
                        constants.JOB_STATUS_WAITLOCK,
2861
                        constants.JOB_STATUS_CANCELING)):
2862
        # job is still present and waiting
2863
        continue
2864
      # good candidate found (either running job or lost job)
2865
      self.jobs.remove(job_data)
2866
      return job_data
2867

    
2868
    # no job found
2869
    return self.jobs.pop(0)
2870

    
2871
  def GetResults(self):
2872
    """Wait for and return the results of all jobs.
2873

2874
    @rtype: list
2875
    @return: list of tuples (success, job results), in the same order
2876
        as the submitted jobs; if a job has failed, instead of the result
2877
        there will be the error message
2878

2879
    """
2880
    if not self.jobs:
2881
      self.SubmitPending()
2882
    results = []
2883
    if self.verbose:
2884
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2885
      if ok_jobs:
2886
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2887

    
2888
    # first, remove any non-submitted jobs
2889
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2890
    for idx, _, jid, name in failures:
2891
      ToStderr("Failed to submit job for %s: %s", name, jid)
2892
      results.append((idx, False, jid))
2893

    
2894
    while self.jobs:
2895
      (idx, _, jid, name) = self._ChooseJob()
2896
      ToStdout("Waiting for job %s for %s...", jid, name)
2897
      try:
2898
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2899
        success = True
2900
      except errors.JobLost, err:
2901
        _, job_result = FormatError(err)
2902
        ToStderr("Job %s for %s has been archived, cannot check its result",
2903
                 jid, name)
2904
        success = False
2905
      except (errors.GenericError, luxi.ProtocolError), err:
2906
        _, job_result = FormatError(err)
2907
        success = False
2908
        # the error message will always be shown, verbose or not
2909
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2910

    
2911
      results.append((idx, success, job_result))
2912

    
2913
    # sort based on the index, then drop it
2914
    results.sort()
2915
    results = [i[1:] for i in results]
2916

    
2917
    return results
2918

    
2919
  def WaitOrShow(self, wait):
2920
    """Wait for job results or only print the job IDs.
2921

2922
    @type wait: boolean
2923
    @param wait: whether to wait or not
2924

2925
    """
2926
    if wait:
2927
      return self.GetResults()
2928
    else:
2929
      if not self.jobs:
2930
        self.SubmitPending()
2931
      for _, status, result, name in self.jobs:
2932
        if status:
2933
          ToStdout("%s: %s", result, name)
2934
        else:
2935
          ToStderr("Failure for %s: %s", name, result)
2936
      return [row[1:3] for row in self.jobs]