Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 2928de47

History | View | Annotate | Download (99.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
from cStringIO import StringIO
31

    
32
from ganeti import utils
33
from ganeti import errors
34
from ganeti import constants
35
from ganeti import opcodes
36
from ganeti import luxi
37
from ganeti import ssconf
38
from ganeti import rpc
39
from ganeti import ssh
40
from ganeti import compat
41
from ganeti import netutils
42
from ganeti import qlang
43

    
44
from optparse import (OptionParser, TitledHelpFormatter,
45
                      Option, OptionValueError)
46

    
47

    
48
__all__ = [
49
  # Command line options
50
  "ADD_UIDS_OPT",
51
  "ALLOCATABLE_OPT",
52
  "ALLOC_POLICY_OPT",
53
  "ALL_OPT",
54
  "ALLOW_FAILOVER_OPT",
55
  "AUTO_PROMOTE_OPT",
56
  "AUTO_REPLACE_OPT",
57
  "BACKEND_OPT",
58
  "BLK_OS_OPT",
59
  "CAPAB_MASTER_OPT",
60
  "CAPAB_VM_OPT",
61
  "CLEANUP_OPT",
62
  "CLUSTER_DOMAIN_SECRET_OPT",
63
  "CONFIRM_OPT",
64
  "CP_SIZE_OPT",
65
  "DEBUG_OPT",
66
  "DEBUG_SIMERR_OPT",
67
  "DISKIDX_OPT",
68
  "DISK_OPT",
69
  "DISK_TEMPLATE_OPT",
70
  "DRAINED_OPT",
71
  "DRY_RUN_OPT",
72
  "DRBD_HELPER_OPT",
73
  "DST_NODE_OPT",
74
  "EARLY_RELEASE_OPT",
75
  "ENABLED_HV_OPT",
76
  "ERROR_CODES_OPT",
77
  "FIELDS_OPT",
78
  "FILESTORE_DIR_OPT",
79
  "FILESTORE_DRIVER_OPT",
80
  "FORCE_FILTER_OPT",
81
  "FORCE_OPT",
82
  "FORCE_VARIANT_OPT",
83
  "GLOBAL_FILEDIR_OPT",
84
  "HID_OS_OPT",
85
  "GLOBAL_SHARED_FILEDIR_OPT",
86
  "HVLIST_OPT",
87
  "HVOPTS_OPT",
88
  "HYPERVISOR_OPT",
89
  "IALLOCATOR_OPT",
90
  "DEFAULT_IALLOCATOR_OPT",
91
  "IDENTIFY_DEFAULTS_OPT",
92
  "IGNORE_CONSIST_OPT",
93
  "IGNORE_FAILURES_OPT",
94
  "IGNORE_OFFLINE_OPT",
95
  "IGNORE_REMOVE_FAILURES_OPT",
96
  "IGNORE_SECONDARIES_OPT",
97
  "IGNORE_SIZE_OPT",
98
  "INTERVAL_OPT",
99
  "MAC_PREFIX_OPT",
100
  "MAINTAIN_NODE_HEALTH_OPT",
101
  "MASTER_NETDEV_OPT",
102
  "MC_OPT",
103
  "MIGRATION_MODE_OPT",
104
  "NET_OPT",
105
  "NEW_CLUSTER_CERT_OPT",
106
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
107
  "NEW_CONFD_HMAC_KEY_OPT",
108
  "NEW_RAPI_CERT_OPT",
109
  "NEW_SECONDARY_OPT",
110
  "NIC_PARAMS_OPT",
111
  "NODE_FORCE_JOIN_OPT",
112
  "NODE_LIST_OPT",
113
  "NODE_PLACEMENT_OPT",
114
  "NODEGROUP_OPT",
115
  "NODE_PARAMS_OPT",
116
  "NODE_POWERED_OPT",
117
  "NODRBD_STORAGE_OPT",
118
  "NOHDR_OPT",
119
  "NOIPCHECK_OPT",
120
  "NO_INSTALL_OPT",
121
  "NONAMECHECK_OPT",
122
  "NOLVM_STORAGE_OPT",
123
  "NOMODIFY_ETCHOSTS_OPT",
124
  "NOMODIFY_SSH_SETUP_OPT",
125
  "NONICS_OPT",
126
  "NONLIVE_OPT",
127
  "NONPLUS1_OPT",
128
  "NOSHUTDOWN_OPT",
129
  "NOSTART_OPT",
130
  "NOSSH_KEYCHECK_OPT",
131
  "NOVOTING_OPT",
132
  "NWSYNC_OPT",
133
  "ON_PRIMARY_OPT",
134
  "ON_SECONDARY_OPT",
135
  "OFFLINE_OPT",
136
  "OSPARAMS_OPT",
137
  "OS_OPT",
138
  "OS_SIZE_OPT",
139
  "OOB_TIMEOUT_OPT",
140
  "POWER_DELAY_OPT",
141
  "PREALLOC_WIPE_DISKS_OPT",
142
  "PRIMARY_IP_VERSION_OPT",
143
  "PRIORITY_OPT",
144
  "RAPI_CERT_OPT",
145
  "READD_OPT",
146
  "REBOOT_TYPE_OPT",
147
  "REMOVE_INSTANCE_OPT",
148
  "REMOVE_UIDS_OPT",
149
  "RESERVED_LVS_OPT",
150
  "ROMAN_OPT",
151
  "SECONDARY_IP_OPT",
152
  "SELECT_OS_OPT",
153
  "SEP_OPT",
154
  "SHOWCMD_OPT",
155
  "SHUTDOWN_TIMEOUT_OPT",
156
  "SINGLE_NODE_OPT",
157
  "SRC_DIR_OPT",
158
  "SRC_NODE_OPT",
159
  "SUBMIT_OPT",
160
  "STATIC_OPT",
161
  "SYNC_OPT",
162
  "TAG_SRC_OPT",
163
  "TIMEOUT_OPT",
164
  "UIDPOOL_OPT",
165
  "USEUNITS_OPT",
166
  "USE_REPL_NET_OPT",
167
  "VERBOSE_OPT",
168
  "VG_NAME_OPT",
169
  "YES_DOIT_OPT",
170
  # Generic functions for CLI programs
171
  "ConfirmOperation",
172
  "GenericMain",
173
  "GenericInstanceCreate",
174
  "GenericList",
175
  "GenericListFields",
176
  "GetClient",
177
  "GetOnlineNodes",
178
  "JobExecutor",
179
  "JobSubmittedException",
180
  "ParseTimespec",
181
  "RunWhileClusterStopped",
182
  "SubmitOpCode",
183
  "SubmitOrSend",
184
  "UsesRPC",
185
  # Formatting functions
186
  "ToStderr", "ToStdout",
187
  "FormatError",
188
  "FormatQueryResult",
189
  "FormatParameterDict",
190
  "GenerateTable",
191
  "AskUser",
192
  "FormatTimestamp",
193
  "FormatLogMessage",
194
  # Tags functions
195
  "ListTags",
196
  "AddTags",
197
  "RemoveTags",
198
  # command line options support infrastructure
199
  "ARGS_MANY_INSTANCES",
200
  "ARGS_MANY_NODES",
201
  "ARGS_MANY_GROUPS",
202
  "ARGS_NONE",
203
  "ARGS_ONE_INSTANCE",
204
  "ARGS_ONE_NODE",
205
  "ARGS_ONE_GROUP",
206
  "ARGS_ONE_OS",
207
  "ArgChoice",
208
  "ArgCommand",
209
  "ArgFile",
210
  "ArgGroup",
211
  "ArgHost",
212
  "ArgInstance",
213
  "ArgJobId",
214
  "ArgNode",
215
  "ArgOs",
216
  "ArgSuggest",
217
  "ArgUnknown",
218
  "OPT_COMPL_INST_ADD_NODES",
219
  "OPT_COMPL_MANY_NODES",
220
  "OPT_COMPL_ONE_IALLOCATOR",
221
  "OPT_COMPL_ONE_INSTANCE",
222
  "OPT_COMPL_ONE_NODE",
223
  "OPT_COMPL_ONE_NODEGROUP",
224
  "OPT_COMPL_ONE_OS",
225
  "cli_option",
226
  "SplitNodeOption",
227
  "CalculateOSNames",
228
  "ParseFields",
229
  "COMMON_CREATE_OPTS",
230
  ]
231

    
232
NO_PREFIX = "no_"
233
UN_PREFIX = "-"
234

    
235
#: Priorities (sorted)
236
_PRIORITY_NAMES = [
237
  ("low", constants.OP_PRIO_LOW),
238
  ("normal", constants.OP_PRIO_NORMAL),
239
  ("high", constants.OP_PRIO_HIGH),
240
  ]
241

    
242
#: Priority dictionary for easier lookup
243
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
244
# we migrate to Python 2.6
245
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
246

    
247
# Query result status for clients
248
(QR_NORMAL,
249
 QR_UNKNOWN,
250
 QR_INCOMPLETE) = range(3)
251

    
252

    
253
class _Argument:
254
  def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
255
    self.min = min
256
    self.max = max
257

    
258
  def __repr__(self):
259
    return ("<%s min=%s max=%s>" %
260
            (self.__class__.__name__, self.min, self.max))
261

    
262

    
263
class ArgSuggest(_Argument):
264
  """Suggesting argument.
265

266
  Value can be any of the ones passed to the constructor.
267

268
  """
269
  # pylint: disable-msg=W0622
270
  def __init__(self, min=0, max=None, choices=None):
271
    _Argument.__init__(self, min=min, max=max)
272
    self.choices = choices
273

    
274
  def __repr__(self):
275
    return ("<%s min=%s max=%s choices=%r>" %
276
            (self.__class__.__name__, self.min, self.max, self.choices))
277

    
278

    
279
class ArgChoice(ArgSuggest):
280
  """Choice argument.
281

282
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
283
  but value must be one of the choices.
284

285
  """
286

    
287

    
288
class ArgUnknown(_Argument):
289
  """Unknown argument to program (e.g. determined at runtime).
290

291
  """
292

    
293

    
294
class ArgInstance(_Argument):
295
  """Instances argument.
296

297
  """
298

    
299

    
300
class ArgNode(_Argument):
301
  """Node argument.
302

303
  """
304

    
305

    
306
class ArgGroup(_Argument):
307
  """Node group argument.
308

309
  """
310

    
311

    
312
class ArgJobId(_Argument):
313
  """Job ID argument.
314

315
  """
316

    
317

    
318
class ArgFile(_Argument):
319
  """File path argument.
320

321
  """
322

    
323

    
324
class ArgCommand(_Argument):
325
  """Command argument.
326

327
  """
328

    
329

    
330
class ArgHost(_Argument):
331
  """Host argument.
332

333
  """
334

    
335

    
336
class ArgOs(_Argument):
337
  """OS argument.
338

339
  """
340

    
341

    
342
ARGS_NONE = []
343
ARGS_MANY_INSTANCES = [ArgInstance()]
344
ARGS_MANY_NODES = [ArgNode()]
345
ARGS_MANY_GROUPS = [ArgGroup()]
346
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
347
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
348
ARGS_ONE_GROUP = [ArgInstance(min=1, max=1)]
349
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
350

    
351

    
352
def _ExtractTagsObject(opts, args):
353
  """Extract the tag type object.
354

355
  Note that this function will modify its args parameter.
356

357
  """
358
  if not hasattr(opts, "tag_type"):
359
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
360
  kind = opts.tag_type
361
  if kind == constants.TAG_CLUSTER:
362
    retval = kind, kind
363
  elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE:
364
    if not args:
365
      raise errors.OpPrereqError("no arguments passed to the command")
366
    name = args.pop(0)
367
    retval = kind, name
368
  else:
369
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
370
  return retval
371

    
372

    
373
def _ExtendTags(opts, args):
374
  """Extend the args if a source file has been given.
375

376
  This function will extend the tags with the contents of the file
377
  passed in the 'tags_source' attribute of the opts parameter. A file
378
  named '-' will be replaced by stdin.
379

380
  """
381
  fname = opts.tags_source
382
  if fname is None:
383
    return
384
  if fname == "-":
385
    new_fh = sys.stdin
386
  else:
387
    new_fh = open(fname, "r")
388
  new_data = []
389
  try:
390
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
391
    # because of python bug 1633941
392
    while True:
393
      line = new_fh.readline()
394
      if not line:
395
        break
396
      new_data.append(line.strip())
397
  finally:
398
    new_fh.close()
399
  args.extend(new_data)
400

    
401

    
402
def ListTags(opts, args):
403
  """List the tags on a given object.
404

405
  This is a generic implementation that knows how to deal with all
406
  three cases of tag objects (cluster, node, instance). The opts
407
  argument is expected to contain a tag_type field denoting what
408
  object type we work on.
409

410
  """
411
  kind, name = _ExtractTagsObject(opts, args)
412
  cl = GetClient()
413
  result = cl.QueryTags(kind, name)
414
  result = list(result)
415
  result.sort()
416
  for tag in result:
417
    ToStdout(tag)
418

    
419

    
420
def AddTags(opts, args):
421
  """Add tags on a given object.
422

423
  This is a generic implementation that knows how to deal with all
424
  three cases of tag objects (cluster, node, instance). The opts
425
  argument is expected to contain a tag_type field denoting what
426
  object type we work on.
427

428
  """
429
  kind, name = _ExtractTagsObject(opts, args)
430
  _ExtendTags(opts, args)
431
  if not args:
432
    raise errors.OpPrereqError("No tags to be added")
433
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
434
  SubmitOpCode(op, opts=opts)
435

    
436

    
437
def RemoveTags(opts, args):
438
  """Remove tags from a given object.
439

440
  This is a generic implementation that knows how to deal with all
441
  three cases of tag objects (cluster, node, instance). The opts
442
  argument is expected to contain a tag_type field denoting what
443
  object type we work on.
444

445
  """
446
  kind, name = _ExtractTagsObject(opts, args)
447
  _ExtendTags(opts, args)
448
  if not args:
449
    raise errors.OpPrereqError("No tags to be removed")
450
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
451
  SubmitOpCode(op, opts=opts)
452

    
453

    
454
def check_unit(option, opt, value): # pylint: disable-msg=W0613
455
  """OptParsers custom converter for units.
456

457
  """
458
  try:
459
    return utils.ParseUnit(value)
460
  except errors.UnitParseError, err:
461
    raise OptionValueError("option %s: %s" % (opt, err))
462

    
463

    
464
def _SplitKeyVal(opt, data):
465
  """Convert a KeyVal string into a dict.
466

467
  This function will convert a key=val[,...] string into a dict. Empty
468
  values will be converted specially: keys which have the prefix 'no_'
469
  will have the value=False and the prefix stripped, the others will
470
  have value=True.
471

472
  @type opt: string
473
  @param opt: a string holding the option name for which we process the
474
      data, used in building error messages
475
  @type data: string
476
  @param data: a string of the format key=val,key=val,...
477
  @rtype: dict
478
  @return: {key=val, key=val}
479
  @raises errors.ParameterError: if there are duplicate keys
480

481
  """
482
  kv_dict = {}
483
  if data:
484
    for elem in utils.UnescapeAndSplit(data, sep=","):
485
      if "=" in elem:
486
        key, val = elem.split("=", 1)
487
      else:
488
        if elem.startswith(NO_PREFIX):
489
          key, val = elem[len(NO_PREFIX):], False
490
        elif elem.startswith(UN_PREFIX):
491
          key, val = elem[len(UN_PREFIX):], None
492
        else:
493
          key, val = elem, True
494
      if key in kv_dict:
495
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
496
                                    (key, opt))
497
      kv_dict[key] = val
498
  return kv_dict
499

    
500

    
501
def check_ident_key_val(option, opt, value):  # pylint: disable-msg=W0613
502
  """Custom parser for ident:key=val,key=val options.
503

504
  This will store the parsed values as a tuple (ident, {key: val}). As such,
505
  multiple uses of this option via action=append is possible.
506

507
  """
508
  if ":" not in value:
509
    ident, rest = value, ''
510
  else:
511
    ident, rest = value.split(":", 1)
512

    
513
  if ident.startswith(NO_PREFIX):
514
    if rest:
515
      msg = "Cannot pass options when removing parameter groups: %s" % value
516
      raise errors.ParameterError(msg)
517
    retval = (ident[len(NO_PREFIX):], False)
518
  elif ident.startswith(UN_PREFIX):
519
    if rest:
520
      msg = "Cannot pass options when removing parameter groups: %s" % value
521
      raise errors.ParameterError(msg)
522
    retval = (ident[len(UN_PREFIX):], None)
523
  else:
524
    kv_dict = _SplitKeyVal(opt, rest)
525
    retval = (ident, kv_dict)
526
  return retval
527

    
528

    
529
def check_key_val(option, opt, value):  # pylint: disable-msg=W0613
530
  """Custom parser class for key=val,key=val options.
531

532
  This will store the parsed values as a dict {key: val}.
533

534
  """
535
  return _SplitKeyVal(opt, value)
536

    
537

    
538
def check_bool(option, opt, value): # pylint: disable-msg=W0613
539
  """Custom parser for yes/no options.
540

541
  This will store the parsed value as either True or False.
542

543
  """
544
  value = value.lower()
545
  if value == constants.VALUE_FALSE or value == "no":
546
    return False
547
  elif value == constants.VALUE_TRUE or value == "yes":
548
    return True
549
  else:
550
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
551

    
552

    
553
# completion_suggestion is normally a list. Using numeric values not evaluating
554
# to False for dynamic completion.
555
(OPT_COMPL_MANY_NODES,
556
 OPT_COMPL_ONE_NODE,
557
 OPT_COMPL_ONE_INSTANCE,
558
 OPT_COMPL_ONE_OS,
559
 OPT_COMPL_ONE_IALLOCATOR,
560
 OPT_COMPL_INST_ADD_NODES,
561
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
562

    
563
OPT_COMPL_ALL = frozenset([
564
  OPT_COMPL_MANY_NODES,
565
  OPT_COMPL_ONE_NODE,
566
  OPT_COMPL_ONE_INSTANCE,
567
  OPT_COMPL_ONE_OS,
568
  OPT_COMPL_ONE_IALLOCATOR,
569
  OPT_COMPL_INST_ADD_NODES,
570
  OPT_COMPL_ONE_NODEGROUP,
571
  ])
572

    
573

    
574
class CliOption(Option):
575
  """Custom option class for optparse.
576

577
  """
578
  ATTRS = Option.ATTRS + [
579
    "completion_suggest",
580
    ]
581
  TYPES = Option.TYPES + (
582
    "identkeyval",
583
    "keyval",
584
    "unit",
585
    "bool",
586
    )
587
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
588
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
589
  TYPE_CHECKER["keyval"] = check_key_val
590
  TYPE_CHECKER["unit"] = check_unit
591
  TYPE_CHECKER["bool"] = check_bool
592

    
593

    
594
# optparse.py sets make_option, so we do it for our own option class, too
595
cli_option = CliOption
596

    
597

    
598
_YORNO = "yes|no"
599

    
600
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
601
                       help="Increase debugging level")
602

    
603
NOHDR_OPT = cli_option("--no-headers", default=False,
604
                       action="store_true", dest="no_headers",
605
                       help="Don't display column headers")
606

    
607
SEP_OPT = cli_option("--separator", default=None,
608
                     action="store", dest="separator",
609
                     help=("Separator between output fields"
610
                           " (defaults to one space)"))
611

    
612
USEUNITS_OPT = cli_option("--units", default=None,
613
                          dest="units", choices=('h', 'm', 'g', 't'),
614
                          help="Specify units for output (one of h/m/g/t)")
615

    
616
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
617
                        type="string", metavar="FIELDS",
618
                        help="Comma separated list of output fields")
619

    
620
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
621
                       default=False, help="Force the operation")
622

    
623
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
624
                         default=False, help="Do not require confirmation")
625

    
626
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
627
                                  action="store_true", default=False,
628
                                  help=("Ignore offline nodes and do as much"
629
                                        " as possible"))
630

    
631
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
632
                         default=None, help="File with tag names")
633

    
634
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
635
                        default=False, action="store_true",
636
                        help=("Submit the job and return the job ID, but"
637
                              " don't wait for the job to finish"))
638

    
639
SYNC_OPT = cli_option("--sync", dest="do_locking",
640
                      default=False, action="store_true",
641
                      help=("Grab locks while doing the queries"
642
                            " in order to ensure more consistent results"))
643

    
644
DRY_RUN_OPT = cli_option("--dry-run", default=False,
645
                         action="store_true",
646
                         help=("Do not execute the operation, just run the"
647
                               " check steps and verify it it could be"
648
                               " executed"))
649

    
650
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
651
                         action="store_true",
652
                         help="Increase the verbosity of the operation")
653

    
654
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
655
                              action="store_true", dest="simulate_errors",
656
                              help="Debugging option that makes the operation"
657
                              " treat most runtime checks as failed")
658

    
659
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
660
                        default=True, action="store_false",
661
                        help="Don't wait for sync (DANGEROUS!)")
662

    
663
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
664
                               help="Custom disk setup (diskless, file,"
665
                               " plain or drbd)",
666
                               default=None, metavar="TEMPL",
667
                               choices=list(constants.DISK_TEMPLATES))
668

    
669
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
670
                        help="Do not create any network cards for"
671
                        " the instance")
672

    
673
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
674
                               help="Relative path under default cluster-wide"
675
                               " file storage dir to store file-based disks",
676
                               default=None, metavar="<DIR>")
677

    
678
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
679
                                  help="Driver to use for image files",
680
                                  default="loop", metavar="<DRIVER>",
681
                                  choices=list(constants.FILE_DRIVER))
682

    
683
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
684
                            help="Select nodes for the instance automatically"
685
                            " using the <NAME> iallocator plugin",
686
                            default=None, type="string",
687
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
688

    
689
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
690
                            metavar="<NAME>",
691
                            help="Set the default instance allocator plugin",
692
                            default=None, type="string",
693
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
694

    
695
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
696
                    metavar="<os>",
697
                    completion_suggest=OPT_COMPL_ONE_OS)
698

    
699
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
700
                         type="keyval", default={},
701
                         help="OS parameters")
702

    
703
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
704
                               action="store_true", default=False,
705
                               help="Force an unknown variant")
706

    
707
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
708
                            action="store_true", default=False,
709
                            help="Do not install the OS (will"
710
                            " enable no-start)")
711

    
712
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
713
                         type="keyval", default={},
714
                         help="Backend parameters")
715

    
716
HVOPTS_OPT =  cli_option("-H", "--hypervisor-parameters", type="keyval",
717
                         default={}, dest="hvparams",
718
                         help="Hypervisor parameters")
719

    
720
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
721
                            help="Hypervisor and hypervisor options, in the"
722
                            " format hypervisor:option=value,option=value,...",
723
                            default=None, type="identkeyval")
724

    
725
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
726
                        help="Hypervisor and hypervisor options, in the"
727
                        " format hypervisor:option=value,option=value,...",
728
                        default=[], action="append", type="identkeyval")
729

    
730
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
731
                           action="store_false",
732
                           help="Don't check that the instance's IP"
733
                           " is alive")
734

    
735
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
736
                             default=True, action="store_false",
737
                             help="Don't check that the instance's name"
738
                             " is resolvable")
739

    
740
NET_OPT = cli_option("--net",
741
                     help="NIC parameters", default=[],
742
                     dest="nics", action="append", type="identkeyval")
743

    
744
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
745
                      dest="disks", action="append", type="identkeyval")
746

    
747
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
748
                         help="Comma-separated list of disks"
749
                         " indices to act on (e.g. 0,2) (optional,"
750
                         " defaults to all disks)")
751

    
752
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
753
                         help="Enforces a single-disk configuration using the"
754
                         " given disk size, in MiB unless a suffix is used",
755
                         default=None, type="unit", metavar="<size>")
756

    
757
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
758
                                dest="ignore_consistency",
759
                                action="store_true", default=False,
760
                                help="Ignore the consistency of the disks on"
761
                                " the secondary")
762

    
763
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
764
                                dest="allow_failover",
765
                                action="store_true", default=False,
766
                                help="If migration is not possible fallback to"
767
                                     " failover")
768

    
769
NONLIVE_OPT = cli_option("--non-live", dest="live",
770
                         default=True, action="store_false",
771
                         help="Do a non-live migration (this usually means"
772
                         " freeze the instance, save the state, transfer and"
773
                         " only then resume running on the secondary node)")
774

    
775
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
776
                                default=None,
777
                                choices=list(constants.HT_MIGRATION_MODES),
778
                                help="Override default migration mode (choose"
779
                                " either live or non-live")
780

    
781
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
782
                                help="Target node and optional secondary node",
783
                                metavar="<pnode>[:<snode>]",
784
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
785

    
786
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
787
                           action="append", metavar="<node>",
788
                           help="Use only this node (can be used multiple"
789
                           " times, if not given defaults to all nodes)",
790
                           completion_suggest=OPT_COMPL_ONE_NODE)
791

    
792
NODEGROUP_OPT = cli_option("-g", "--node-group",
793
                           dest="nodegroup",
794
                           help="Node group (name or uuid)",
795
                           metavar="<nodegroup>",
796
                           default=None, type="string",
797
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
798

    
799
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
800
                             metavar="<node>",
801
                             completion_suggest=OPT_COMPL_ONE_NODE)
802

    
803
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
804
                         action="store_false",
805
                         help="Don't start the instance after creation")
806

    
807
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
808
                         action="store_true", default=False,
809
                         help="Show command instead of executing it")
810

    
811
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
812
                         default=False, action="store_true",
813
                         help="Instead of performing the migration, try to"
814
                         " recover from a failed cleanup. This is safe"
815
                         " to run even if the instance is healthy, but it"
816
                         " will create extra replication traffic and "
817
                         " disrupt briefly the replication (like during the"
818
                         " migration")
819

    
820
STATIC_OPT = cli_option("-s", "--static", dest="static",
821
                        action="store_true", default=False,
822
                        help="Only show configuration data, not runtime data")
823

    
824
ALL_OPT = cli_option("--all", dest="show_all",
825
                     default=False, action="store_true",
826
                     help="Show info on all instances on the cluster."
827
                     " This can take a long time to run, use wisely")
828

    
829
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
830
                           action="store_true", default=False,
831
                           help="Interactive OS reinstall, lists available"
832
                           " OS templates for selection")
833

    
834
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
835
                                 action="store_true", default=False,
836
                                 help="Remove the instance from the cluster"
837
                                 " configuration even if there are failures"
838
                                 " during the removal process")
839

    
840
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
841
                                        dest="ignore_remove_failures",
842
                                        action="store_true", default=False,
843
                                        help="Remove the instance from the"
844
                                        " cluster configuration even if there"
845
                                        " are failures during the removal"
846
                                        " process")
847

    
848
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
849
                                 action="store_true", default=False,
850
                                 help="Remove the instance from the cluster")
851

    
852
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
853
                               help="Specifies the new node for the instance",
854
                               metavar="NODE", default=None,
855
                               completion_suggest=OPT_COMPL_ONE_NODE)
856

    
857
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
858
                               help="Specifies the new secondary node",
859
                               metavar="NODE", default=None,
860
                               completion_suggest=OPT_COMPL_ONE_NODE)
861

    
862
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
863
                            default=False, action="store_true",
864
                            help="Replace the disk(s) on the primary"
865
                            " node (only for the drbd template)")
866

    
867
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
868
                              default=False, action="store_true",
869
                              help="Replace the disk(s) on the secondary"
870
                              " node (only for the drbd template)")
871

    
872
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
873
                              default=False, action="store_true",
874
                              help="Lock all nodes and auto-promote as needed"
875
                              " to MC status")
876

    
877
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
878
                              default=False, action="store_true",
879
                              help="Automatically replace faulty disks"
880
                              " (only for the drbd template)")
881

    
882
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
883
                             default=False, action="store_true",
884
                             help="Ignore current recorded size"
885
                             " (useful for forcing activation when"
886
                             " the recorded size is wrong)")
887

    
888
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
889
                          metavar="<node>",
890
                          completion_suggest=OPT_COMPL_ONE_NODE)
891

    
892
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
893
                         metavar="<dir>")
894

    
895
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
896
                              help="Specify the secondary ip for the node",
897
                              metavar="ADDRESS", default=None)
898

    
899
READD_OPT = cli_option("--readd", dest="readd",
900
                       default=False, action="store_true",
901
                       help="Readd old node after replacing it")
902

    
903
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
904
                                default=True, action="store_false",
905
                                help="Disable SSH key fingerprint checking")
906

    
907
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
908
                                 default=False, action="store_true",
909
                                 help="Force the joining of a node")
910

    
911
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
912
                    type="bool", default=None, metavar=_YORNO,
913
                    help="Set the master_candidate flag on the node")
914

    
915
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
916
                         type="bool", default=None,
917
                         help=("Set the offline flag on the node"
918
                               " (cluster does not communicate with offline"
919
                               " nodes)"))
920

    
921
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
922
                         type="bool", default=None,
923
                         help=("Set the drained flag on the node"
924
                               " (excluded from allocation operations)"))
925

    
926
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
927
                    type="bool", default=None, metavar=_YORNO,
928
                    help="Set the master_capable flag on the node")
929

    
930
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
931
                    type="bool", default=None, metavar=_YORNO,
932
                    help="Set the vm_capable flag on the node")
933

    
934
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
935
                             type="bool", default=None, metavar=_YORNO,
936
                             help="Set the allocatable flag on a volume")
937

    
938
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
939
                               help="Disable support for lvm based instances"
940
                               " (cluster-wide)",
941
                               action="store_false", default=True)
942

    
943
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
944
                            dest="enabled_hypervisors",
945
                            help="Comma-separated list of hypervisors",
946
                            type="string", default=None)
947

    
948
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
949
                            type="keyval", default={},
950
                            help="NIC parameters")
951

    
952
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
953
                         dest="candidate_pool_size", type="int",
954
                         help="Set the candidate pool size")
955

    
956
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
957
                         help=("Enables LVM and specifies the volume group"
958
                               " name (cluster-wide) for disk allocation"
959
                               " [%s]" % constants.DEFAULT_VG),
960
                         metavar="VG", default=None)
961

    
962
YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
963
                          help="Destroy cluster", action="store_true")
964

    
965
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
966
                          help="Skip node agreement check (dangerous)",
967
                          action="store_true", default=False)
968

    
969
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
970
                            help="Specify the mac prefix for the instance IP"
971
                            " addresses, in the format XX:XX:XX",
972
                            metavar="PREFIX",
973
                            default=None)
974

    
975
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
976
                               help="Specify the node interface (cluster-wide)"
977
                               " on which the master IP address will be added"
978
                               " (cluster init default: %s)" %
979
                               constants.DEFAULT_BRIDGE,
980
                               metavar="NETDEV",
981
                               default=None)
982

    
983
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
984
                                help="Specify the default directory (cluster-"
985
                                "wide) for storing the file-based disks [%s]" %
986
                                constants.DEFAULT_FILE_STORAGE_DIR,
987
                                metavar="DIR",
988
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
989

    
990
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
991
                            dest="shared_file_storage_dir",
992
                            help="Specify the default directory (cluster-"
993
                            "wide) for storing the shared file-based"
994
                            " disks [%s]" %
995
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
996
                            metavar="SHAREDDIR",
997
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
998

    
999
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1000
                                   help="Don't modify /etc/hosts",
1001
                                   action="store_false", default=True)
1002

    
1003
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1004
                                    help="Don't initialize SSH keys",
1005
                                    action="store_false", default=True)
1006

    
1007
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1008
                             help="Enable parseable error messages",
1009
                             action="store_true", default=False)
1010

    
1011
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1012
                          help="Skip N+1 memory redundancy tests",
1013
                          action="store_true", default=False)
1014

    
1015
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1016
                             help="Type of reboot: soft/hard/full",
1017
                             default=constants.INSTANCE_REBOOT_HARD,
1018
                             metavar="<REBOOT>",
1019
                             choices=list(constants.REBOOT_TYPES))
1020

    
1021
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1022
                                    dest="ignore_secondaries",
1023
                                    default=False, action="store_true",
1024
                                    help="Ignore errors from secondaries")
1025

    
1026
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1027
                            action="store_false", default=True,
1028
                            help="Don't shutdown the instance (unsafe)")
1029

    
1030
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1031
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1032
                         help="Maximum time to wait")
1033

    
1034
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1035
                         dest="shutdown_timeout", type="int",
1036
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1037
                         help="Maximum time to wait for instance shutdown")
1038

    
1039
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1040
                          default=None,
1041
                          help=("Number of seconds between repetions of the"
1042
                                " command"))
1043

    
1044
EARLY_RELEASE_OPT = cli_option("--early-release",
1045
                               dest="early_release", default=False,
1046
                               action="store_true",
1047
                               help="Release the locks on the secondary"
1048
                               " node(s) early")
1049

    
1050
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1051
                                  dest="new_cluster_cert",
1052
                                  default=False, action="store_true",
1053
                                  help="Generate a new cluster certificate")
1054

    
1055
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1056
                           default=None,
1057
                           help="File containing new RAPI certificate")
1058

    
1059
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1060
                               default=None, action="store_true",
1061
                               help=("Generate a new self-signed RAPI"
1062
                                     " certificate"))
1063

    
1064
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1065
                                    dest="new_confd_hmac_key",
1066
                                    default=False, action="store_true",
1067
                                    help=("Create a new HMAC key for %s" %
1068
                                          constants.CONFD))
1069

    
1070
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1071
                                       dest="cluster_domain_secret",
1072
                                       default=None,
1073
                                       help=("Load new new cluster domain"
1074
                                             " secret from file"))
1075

    
1076
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1077
                                           dest="new_cluster_domain_secret",
1078
                                           default=False, action="store_true",
1079
                                           help=("Create a new cluster domain"
1080
                                                 " secret"))
1081

    
1082
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1083
                              dest="use_replication_network",
1084
                              help="Whether to use the replication network"
1085
                              " for talking to the nodes",
1086
                              action="store_true", default=False)
1087

    
1088
MAINTAIN_NODE_HEALTH_OPT = \
1089
    cli_option("--maintain-node-health", dest="maintain_node_health",
1090
               metavar=_YORNO, default=None, type="bool",
1091
               help="Configure the cluster to automatically maintain node"
1092
               " health, by shutting down unknown instances, shutting down"
1093
               " unknown DRBD devices, etc.")
1094

    
1095
IDENTIFY_DEFAULTS_OPT = \
1096
    cli_option("--identify-defaults", dest="identify_defaults",
1097
               default=False, action="store_true",
1098
               help="Identify which saved instance parameters are equal to"
1099
               " the current cluster defaults and set them as such, instead"
1100
               " of marking them as overridden")
1101

    
1102
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1103
                         action="store", dest="uid_pool",
1104
                         help=("A list of user-ids or user-id"
1105
                               " ranges separated by commas"))
1106

    
1107
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1108
                          action="store", dest="add_uids",
1109
                          help=("A list of user-ids or user-id"
1110
                                " ranges separated by commas, to be"
1111
                                " added to the user-id pool"))
1112

    
1113
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1114
                             action="store", dest="remove_uids",
1115
                             help=("A list of user-ids or user-id"
1116
                                   " ranges separated by commas, to be"
1117
                                   " removed from the user-id pool"))
1118

    
1119
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1120
                             action="store", dest="reserved_lvs",
1121
                             help=("A comma-separated list of reserved"
1122
                                   " logical volumes names, that will be"
1123
                                   " ignored by cluster verify"))
1124

    
1125
ROMAN_OPT = cli_option("--roman",
1126
                       dest="roman_integers", default=False,
1127
                       action="store_true",
1128
                       help="Use roman numbers for positive integers")
1129

    
1130
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1131
                             action="store", default=None,
1132
                             help="Specifies usermode helper for DRBD")
1133

    
1134
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1135
                                action="store_false", default=True,
1136
                                help="Disable support for DRBD")
1137

    
1138
PRIMARY_IP_VERSION_OPT = \
1139
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1140
               action="store", dest="primary_ip_version",
1141
               metavar="%d|%d" % (constants.IP4_VERSION,
1142
                                  constants.IP6_VERSION),
1143
               help="Cluster-wide IP version for primary IP")
1144

    
1145
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1146
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1147
                          choices=_PRIONAME_TO_VALUE.keys(),
1148
                          help="Priority for opcode processing")
1149

    
1150
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1151
                        type="bool", default=None, metavar=_YORNO,
1152
                        help="Sets the hidden flag on the OS")
1153

    
1154
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1155
                        type="bool", default=None, metavar=_YORNO,
1156
                        help="Sets the blacklisted flag on the OS")
1157

    
1158
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1159
                                     type="bool", metavar=_YORNO,
1160
                                     dest="prealloc_wipe_disks",
1161
                                     help=("Wipe disks prior to instance"
1162
                                           " creation"))
1163

    
1164
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1165
                             type="keyval", default=None,
1166
                             help="Node parameters")
1167

    
1168
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1169
                              action="store", metavar="POLICY", default=None,
1170
                              help="Allocation policy for the node group")
1171

    
1172
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1173
                              type="bool", metavar=_YORNO,
1174
                              dest="node_powered",
1175
                              help="Specify if the SoR for node is powered")
1176

    
1177
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1178
                         default=constants.OOB_TIMEOUT,
1179
                         help="Maximum time to wait for out-of-band helper")
1180

    
1181
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1182
                             default=constants.OOB_POWER_DELAY,
1183
                             help="Time in seconds to wait between power-ons")
1184

    
1185
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1186
                              action="store_true", default=False,
1187
                              help=("Whether command argument should be treated"
1188
                                    " as filter"))
1189

    
1190

    
1191
#: Options provided by all commands
1192
COMMON_OPTS = [DEBUG_OPT]
1193

    
1194
# common options for creating instances. add and import then add their own
1195
# specific ones.
1196
COMMON_CREATE_OPTS = [
1197
  BACKEND_OPT,
1198
  DISK_OPT,
1199
  DISK_TEMPLATE_OPT,
1200
  FILESTORE_DIR_OPT,
1201
  FILESTORE_DRIVER_OPT,
1202
  HYPERVISOR_OPT,
1203
  IALLOCATOR_OPT,
1204
  NET_OPT,
1205
  NODE_PLACEMENT_OPT,
1206
  NOIPCHECK_OPT,
1207
  NONAMECHECK_OPT,
1208
  NONICS_OPT,
1209
  NWSYNC_OPT,
1210
  OSPARAMS_OPT,
1211
  OS_SIZE_OPT,
1212
  SUBMIT_OPT,
1213
  DRY_RUN_OPT,
1214
  PRIORITY_OPT,
1215
  ]
1216

    
1217

    
1218
def _ParseArgs(argv, commands, aliases):
1219
  """Parser for the command line arguments.
1220

1221
  This function parses the arguments and returns the function which
1222
  must be executed together with its (modified) arguments.
1223

1224
  @param argv: the command line
1225
  @param commands: dictionary with special contents, see the design
1226
      doc for cmdline handling
1227
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1228

1229
  """
1230
  if len(argv) == 0:
1231
    binary = "<command>"
1232
  else:
1233
    binary = argv[0].split("/")[-1]
1234

    
1235
  if len(argv) > 1 and argv[1] == "--version":
1236
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1237
             constants.RELEASE_VERSION)
1238
    # Quit right away. That way we don't have to care about this special
1239
    # argument. optparse.py does it the same.
1240
    sys.exit(0)
1241

    
1242
  if len(argv) < 2 or not (argv[1] in commands or
1243
                           argv[1] in aliases):
1244
    # let's do a nice thing
1245
    sortedcmds = commands.keys()
1246
    sortedcmds.sort()
1247

    
1248
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1249
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1250
    ToStdout("")
1251

    
1252
    # compute the max line length for cmd + usage
1253
    mlen = max([len(" %s" % cmd) for cmd in commands])
1254
    mlen = min(60, mlen) # should not get here...
1255

    
1256
    # and format a nice command list
1257
    ToStdout("Commands:")
1258
    for cmd in sortedcmds:
1259
      cmdstr = " %s" % (cmd,)
1260
      help_text = commands[cmd][4]
1261
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1262
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1263
      for line in help_lines:
1264
        ToStdout("%-*s   %s", mlen, "", line)
1265

    
1266
    ToStdout("")
1267

    
1268
    return None, None, None
1269

    
1270
  # get command, unalias it, and look it up in commands
1271
  cmd = argv.pop(1)
1272
  if cmd in aliases:
1273
    if cmd in commands:
1274
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1275
                                   " command" % cmd)
1276

    
1277
    if aliases[cmd] not in commands:
1278
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1279
                                   " command '%s'" % (cmd, aliases[cmd]))
1280

    
1281
    cmd = aliases[cmd]
1282

    
1283
  func, args_def, parser_opts, usage, description = commands[cmd]
1284
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1285
                        description=description,
1286
                        formatter=TitledHelpFormatter(),
1287
                        usage="%%prog %s %s" % (cmd, usage))
1288
  parser.disable_interspersed_args()
1289
  options, args = parser.parse_args()
1290

    
1291
  if not _CheckArguments(cmd, args_def, args):
1292
    return None, None, None
1293

    
1294
  return func, options, args
1295

    
1296

    
1297
def _CheckArguments(cmd, args_def, args):
1298
  """Verifies the arguments using the argument definition.
1299

1300
  Algorithm:
1301

1302
    1. Abort with error if values specified by user but none expected.
1303

1304
    1. For each argument in definition
1305

1306
      1. Keep running count of minimum number of values (min_count)
1307
      1. Keep running count of maximum number of values (max_count)
1308
      1. If it has an unlimited number of values
1309

1310
        1. Abort with error if it's not the last argument in the definition
1311

1312
    1. If last argument has limited number of values
1313

1314
      1. Abort with error if number of values doesn't match or is too large
1315

1316
    1. Abort with error if user didn't pass enough values (min_count)
1317

1318
  """
1319
  if args and not args_def:
1320
    ToStderr("Error: Command %s expects no arguments", cmd)
1321
    return False
1322

    
1323
  min_count = None
1324
  max_count = None
1325
  check_max = None
1326

    
1327
  last_idx = len(args_def) - 1
1328

    
1329
  for idx, arg in enumerate(args_def):
1330
    if min_count is None:
1331
      min_count = arg.min
1332
    elif arg.min is not None:
1333
      min_count += arg.min
1334

    
1335
    if max_count is None:
1336
      max_count = arg.max
1337
    elif arg.max is not None:
1338
      max_count += arg.max
1339

    
1340
    if idx == last_idx:
1341
      check_max = (arg.max is not None)
1342

    
1343
    elif arg.max is None:
1344
      raise errors.ProgrammerError("Only the last argument can have max=None")
1345

    
1346
  if check_max:
1347
    # Command with exact number of arguments
1348
    if (min_count is not None and max_count is not None and
1349
        min_count == max_count and len(args) != min_count):
1350
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1351
      return False
1352

    
1353
    # Command with limited number of arguments
1354
    if max_count is not None and len(args) > max_count:
1355
      ToStderr("Error: Command %s expects only %d argument(s)",
1356
               cmd, max_count)
1357
      return False
1358

    
1359
  # Command with some required arguments
1360
  if min_count is not None and len(args) < min_count:
1361
    ToStderr("Error: Command %s expects at least %d argument(s)",
1362
             cmd, min_count)
1363
    return False
1364

    
1365
  return True
1366

    
1367

    
1368
def SplitNodeOption(value):
1369
  """Splits the value of a --node option.
1370

1371
  """
1372
  if value and ':' in value:
1373
    return value.split(':', 1)
1374
  else:
1375
    return (value, None)
1376

    
1377

    
1378
def CalculateOSNames(os_name, os_variants):
1379
  """Calculates all the names an OS can be called, according to its variants.
1380

1381
  @type os_name: string
1382
  @param os_name: base name of the os
1383
  @type os_variants: list or None
1384
  @param os_variants: list of supported variants
1385
  @rtype: list
1386
  @return: list of valid names
1387

1388
  """
1389
  if os_variants:
1390
    return ['%s+%s' % (os_name, v) for v in os_variants]
1391
  else:
1392
    return [os_name]
1393

    
1394

    
1395
def ParseFields(selected, default):
1396
  """Parses the values of "--field"-like options.
1397

1398
  @type selected: string or None
1399
  @param selected: User-selected options
1400
  @type default: list
1401
  @param default: Default fields
1402

1403
  """
1404
  if selected is None:
1405
    return default
1406

    
1407
  if selected.startswith("+"):
1408
    return default + selected[1:].split(",")
1409

    
1410
  return selected.split(",")
1411

    
1412

    
1413
UsesRPC = rpc.RunWithRPC
1414

    
1415

    
1416
def AskUser(text, choices=None):
1417
  """Ask the user a question.
1418

1419
  @param text: the question to ask
1420

1421
  @param choices: list with elements tuples (input_char, return_value,
1422
      description); if not given, it will default to: [('y', True,
1423
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1424
      note that the '?' char is reserved for help
1425

1426
  @return: one of the return values from the choices list; if input is
1427
      not possible (i.e. not running with a tty, we return the last
1428
      entry from the list
1429

1430
  """
1431
  if choices is None:
1432
    choices = [('y', True, 'Perform the operation'),
1433
               ('n', False, 'Do not perform the operation')]
1434
  if not choices or not isinstance(choices, list):
1435
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1436
  for entry in choices:
1437
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1438
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1439

    
1440
  answer = choices[-1][1]
1441
  new_text = []
1442
  for line in text.splitlines():
1443
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1444
  text = "\n".join(new_text)
1445
  try:
1446
    f = file("/dev/tty", "a+")
1447
  except IOError:
1448
    return answer
1449
  try:
1450
    chars = [entry[0] for entry in choices]
1451
    chars[-1] = "[%s]" % chars[-1]
1452
    chars.append('?')
1453
    maps = dict([(entry[0], entry[1]) for entry in choices])
1454
    while True:
1455
      f.write(text)
1456
      f.write('\n')
1457
      f.write("/".join(chars))
1458
      f.write(": ")
1459
      line = f.readline(2).strip().lower()
1460
      if line in maps:
1461
        answer = maps[line]
1462
        break
1463
      elif line == '?':
1464
        for entry in choices:
1465
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1466
        f.write("\n")
1467
        continue
1468
  finally:
1469
    f.close()
1470
  return answer
1471

    
1472

    
1473
class JobSubmittedException(Exception):
1474
  """Job was submitted, client should exit.
1475

1476
  This exception has one argument, the ID of the job that was
1477
  submitted. The handler should print this ID.
1478

1479
  This is not an error, just a structured way to exit from clients.
1480

1481
  """
1482

    
1483

    
1484
def SendJob(ops, cl=None):
1485
  """Function to submit an opcode without waiting for the results.
1486

1487
  @type ops: list
1488
  @param ops: list of opcodes
1489
  @type cl: luxi.Client
1490
  @param cl: the luxi client to use for communicating with the master;
1491
             if None, a new client will be created
1492

1493
  """
1494
  if cl is None:
1495
    cl = GetClient()
1496

    
1497
  job_id = cl.SubmitJob(ops)
1498

    
1499
  return job_id
1500

    
1501

    
1502
def GenericPollJob(job_id, cbs, report_cbs):
1503
  """Generic job-polling function.
1504

1505
  @type job_id: number
1506
  @param job_id: Job ID
1507
  @type cbs: Instance of L{JobPollCbBase}
1508
  @param cbs: Data callbacks
1509
  @type report_cbs: Instance of L{JobPollReportCbBase}
1510
  @param report_cbs: Reporting callbacks
1511

1512
  """
1513
  prev_job_info = None
1514
  prev_logmsg_serial = None
1515

    
1516
  status = None
1517

    
1518
  while True:
1519
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1520
                                      prev_logmsg_serial)
1521
    if not result:
1522
      # job not found, go away!
1523
      raise errors.JobLost("Job with id %s lost" % job_id)
1524

    
1525
    if result == constants.JOB_NOTCHANGED:
1526
      report_cbs.ReportNotChanged(job_id, status)
1527

    
1528
      # Wait again
1529
      continue
1530

    
1531
    # Split result, a tuple of (field values, log entries)
1532
    (job_info, log_entries) = result
1533
    (status, ) = job_info
1534

    
1535
    if log_entries:
1536
      for log_entry in log_entries:
1537
        (serial, timestamp, log_type, message) = log_entry
1538
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1539
                                    log_type, message)
1540
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1541

    
1542
    # TODO: Handle canceled and archived jobs
1543
    elif status in (constants.JOB_STATUS_SUCCESS,
1544
                    constants.JOB_STATUS_ERROR,
1545
                    constants.JOB_STATUS_CANCELING,
1546
                    constants.JOB_STATUS_CANCELED):
1547
      break
1548

    
1549
    prev_job_info = job_info
1550

    
1551
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1552
  if not jobs:
1553
    raise errors.JobLost("Job with id %s lost" % job_id)
1554

    
1555
  status, opstatus, result = jobs[0]
1556

    
1557
  if status == constants.JOB_STATUS_SUCCESS:
1558
    return result
1559

    
1560
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1561
    raise errors.OpExecError("Job was canceled")
1562

    
1563
  has_ok = False
1564
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1565
    if status == constants.OP_STATUS_SUCCESS:
1566
      has_ok = True
1567
    elif status == constants.OP_STATUS_ERROR:
1568
      errors.MaybeRaise(msg)
1569

    
1570
      if has_ok:
1571
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1572
                                 (idx, msg))
1573

    
1574
      raise errors.OpExecError(str(msg))
1575

    
1576
  # default failure mode
1577
  raise errors.OpExecError(result)
1578

    
1579

    
1580
class JobPollCbBase:
1581
  """Base class for L{GenericPollJob} callbacks.
1582

1583
  """
1584
  def __init__(self):
1585
    """Initializes this class.
1586

1587
    """
1588

    
1589
  def WaitForJobChangeOnce(self, job_id, fields,
1590
                           prev_job_info, prev_log_serial):
1591
    """Waits for changes on a job.
1592

1593
    """
1594
    raise NotImplementedError()
1595

    
1596
  def QueryJobs(self, job_ids, fields):
1597
    """Returns the selected fields for the selected job IDs.
1598

1599
    @type job_ids: list of numbers
1600
    @param job_ids: Job IDs
1601
    @type fields: list of strings
1602
    @param fields: Fields
1603

1604
    """
1605
    raise NotImplementedError()
1606

    
1607

    
1608
class JobPollReportCbBase:
1609
  """Base class for L{GenericPollJob} reporting callbacks.
1610

1611
  """
1612
  def __init__(self):
1613
    """Initializes this class.
1614

1615
    """
1616

    
1617
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1618
    """Handles a log message.
1619

1620
    """
1621
    raise NotImplementedError()
1622

    
1623
  def ReportNotChanged(self, job_id, status):
1624
    """Called for if a job hasn't changed in a while.
1625

1626
    @type job_id: number
1627
    @param job_id: Job ID
1628
    @type status: string or None
1629
    @param status: Job status if available
1630

1631
    """
1632
    raise NotImplementedError()
1633

    
1634

    
1635
class _LuxiJobPollCb(JobPollCbBase):
1636
  def __init__(self, cl):
1637
    """Initializes this class.
1638

1639
    """
1640
    JobPollCbBase.__init__(self)
1641
    self.cl = cl
1642

    
1643
  def WaitForJobChangeOnce(self, job_id, fields,
1644
                           prev_job_info, prev_log_serial):
1645
    """Waits for changes on a job.
1646

1647
    """
1648
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1649
                                        prev_job_info, prev_log_serial)
1650

    
1651
  def QueryJobs(self, job_ids, fields):
1652
    """Returns the selected fields for the selected job IDs.
1653

1654
    """
1655
    return self.cl.QueryJobs(job_ids, fields)
1656

    
1657

    
1658
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1659
  def __init__(self, feedback_fn):
1660
    """Initializes this class.
1661

1662
    """
1663
    JobPollReportCbBase.__init__(self)
1664

    
1665
    self.feedback_fn = feedback_fn
1666

    
1667
    assert callable(feedback_fn)
1668

    
1669
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1670
    """Handles a log message.
1671

1672
    """
1673
    self.feedback_fn((timestamp, log_type, log_msg))
1674

    
1675
  def ReportNotChanged(self, job_id, status):
1676
    """Called if a job hasn't changed in a while.
1677

1678
    """
1679
    # Ignore
1680

    
1681

    
1682
class StdioJobPollReportCb(JobPollReportCbBase):
1683
  def __init__(self):
1684
    """Initializes this class.
1685

1686
    """
1687
    JobPollReportCbBase.__init__(self)
1688

    
1689
    self.notified_queued = False
1690
    self.notified_waitlock = False
1691

    
1692
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1693
    """Handles a log message.
1694

1695
    """
1696
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1697
             FormatLogMessage(log_type, log_msg))
1698

    
1699
  def ReportNotChanged(self, job_id, status):
1700
    """Called if a job hasn't changed in a while.
1701

1702
    """
1703
    if status is None:
1704
      return
1705

    
1706
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1707
      ToStderr("Job %s is waiting in queue", job_id)
1708
      self.notified_queued = True
1709

    
1710
    elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1711
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1712
      self.notified_waitlock = True
1713

    
1714

    
1715
def FormatLogMessage(log_type, log_msg):
1716
  """Formats a job message according to its type.
1717

1718
  """
1719
  if log_type != constants.ELOG_MESSAGE:
1720
    log_msg = str(log_msg)
1721

    
1722
  return utils.SafeEncode(log_msg)
1723

    
1724

    
1725
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1726
  """Function to poll for the result of a job.
1727

1728
  @type job_id: job identified
1729
  @param job_id: the job to poll for results
1730
  @type cl: luxi.Client
1731
  @param cl: the luxi client to use for communicating with the master;
1732
             if None, a new client will be created
1733

1734
  """
1735
  if cl is None:
1736
    cl = GetClient()
1737

    
1738
  if reporter is None:
1739
    if feedback_fn:
1740
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1741
    else:
1742
      reporter = StdioJobPollReportCb()
1743
  elif feedback_fn:
1744
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1745

    
1746
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1747

    
1748

    
1749
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1750
  """Legacy function to submit an opcode.
1751

1752
  This is just a simple wrapper over the construction of the processor
1753
  instance. It should be extended to better handle feedback and
1754
  interaction functions.
1755

1756
  """
1757
  if cl is None:
1758
    cl = GetClient()
1759

    
1760
  SetGenericOpcodeOpts([op], opts)
1761

    
1762
  job_id = SendJob([op], cl=cl)
1763

    
1764
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1765
                       reporter=reporter)
1766

    
1767
  return op_results[0]
1768

    
1769

    
1770
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1771
  """Wrapper around SubmitOpCode or SendJob.
1772

1773
  This function will decide, based on the 'opts' parameter, whether to
1774
  submit and wait for the result of the opcode (and return it), or
1775
  whether to just send the job and print its identifier. It is used in
1776
  order to simplify the implementation of the '--submit' option.
1777

1778
  It will also process the opcodes if we're sending the via SendJob
1779
  (otherwise SubmitOpCode does it).
1780

1781
  """
1782
  if opts and opts.submit_only:
1783
    job = [op]
1784
    SetGenericOpcodeOpts(job, opts)
1785
    job_id = SendJob(job, cl=cl)
1786
    raise JobSubmittedException(job_id)
1787
  else:
1788
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1789

    
1790

    
1791
def SetGenericOpcodeOpts(opcode_list, options):
1792
  """Processor for generic options.
1793

1794
  This function updates the given opcodes based on generic command
1795
  line options (like debug, dry-run, etc.).
1796

1797
  @param opcode_list: list of opcodes
1798
  @param options: command line options or None
1799
  @return: None (in-place modification)
1800

1801
  """
1802
  if not options:
1803
    return
1804
  for op in opcode_list:
1805
    op.debug_level = options.debug
1806
    if hasattr(options, "dry_run"):
1807
      op.dry_run = options.dry_run
1808
    if getattr(options, "priority", None) is not None:
1809
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1810

    
1811

    
1812
def GetClient():
1813
  # TODO: Cache object?
1814
  try:
1815
    client = luxi.Client()
1816
  except luxi.NoMasterError:
1817
    ss = ssconf.SimpleStore()
1818

    
1819
    # Try to read ssconf file
1820
    try:
1821
      ss.GetMasterNode()
1822
    except errors.ConfigurationError:
1823
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1824
                                 " not part of a cluster")
1825

    
1826
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1827
    if master != myself:
1828
      raise errors.OpPrereqError("This is not the master node, please connect"
1829
                                 " to node '%s' and rerun the command" %
1830
                                 master)
1831
    raise
1832
  return client
1833

    
1834

    
1835
def FormatError(err):
1836
  """Return a formatted error message for a given error.
1837

1838
  This function takes an exception instance and returns a tuple
1839
  consisting of two values: first, the recommended exit code, and
1840
  second, a string describing the error message (not
1841
  newline-terminated).
1842

1843
  """
1844
  retcode = 1
1845
  obuf = StringIO()
1846
  msg = str(err)
1847
  if isinstance(err, errors.ConfigurationError):
1848
    txt = "Corrupt configuration file: %s" % msg
1849
    logging.error(txt)
1850
    obuf.write(txt + "\n")
1851
    obuf.write("Aborting.")
1852
    retcode = 2
1853
  elif isinstance(err, errors.HooksAbort):
1854
    obuf.write("Failure: hooks execution failed:\n")
1855
    for node, script, out in err.args[0]:
1856
      if out:
1857
        obuf.write("  node: %s, script: %s, output: %s\n" %
1858
                   (node, script, out))
1859
      else:
1860
        obuf.write("  node: %s, script: %s (no output)\n" %
1861
                   (node, script))
1862
  elif isinstance(err, errors.HooksFailure):
1863
    obuf.write("Failure: hooks general failure: %s" % msg)
1864
  elif isinstance(err, errors.ResolverError):
1865
    this_host = netutils.Hostname.GetSysName()
1866
    if err.args[0] == this_host:
1867
      msg = "Failure: can't resolve my own hostname ('%s')"
1868
    else:
1869
      msg = "Failure: can't resolve hostname '%s'"
1870
    obuf.write(msg % err.args[0])
1871
  elif isinstance(err, errors.OpPrereqError):
1872
    if len(err.args) == 2:
1873
      obuf.write("Failure: prerequisites not met for this"
1874
               " operation:\nerror type: %s, error details:\n%s" %
1875
                 (err.args[1], err.args[0]))
1876
    else:
1877
      obuf.write("Failure: prerequisites not met for this"
1878
                 " operation:\n%s" % msg)
1879
  elif isinstance(err, errors.OpExecError):
1880
    obuf.write("Failure: command execution error:\n%s" % msg)
1881
  elif isinstance(err, errors.TagError):
1882
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1883
  elif isinstance(err, errors.JobQueueDrainError):
1884
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1885
               " accept new requests\n")
1886
  elif isinstance(err, errors.JobQueueFull):
1887
    obuf.write("Failure: the job queue is full and doesn't accept new"
1888
               " job submissions until old jobs are archived\n")
1889
  elif isinstance(err, errors.TypeEnforcementError):
1890
    obuf.write("Parameter Error: %s" % msg)
1891
  elif isinstance(err, errors.ParameterError):
1892
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1893
  elif isinstance(err, luxi.NoMasterError):
1894
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1895
               " and listening for connections?")
1896
  elif isinstance(err, luxi.TimeoutError):
1897
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1898
               " been submitted and will continue to run even if the call"
1899
               " timed out. Useful commands in this situation are \"gnt-job"
1900
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1901
    obuf.write(msg)
1902
  elif isinstance(err, luxi.PermissionError):
1903
    obuf.write("It seems you don't have permissions to connect to the"
1904
               " master daemon.\nPlease retry as a different user.")
1905
  elif isinstance(err, luxi.ProtocolError):
1906
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1907
               "%s" % msg)
1908
  elif isinstance(err, errors.JobLost):
1909
    obuf.write("Error checking job status: %s" % msg)
1910
  elif isinstance(err, errors.QueryFilterParseError):
1911
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1912
    obuf.write("\n".join(err.GetDetails()))
1913
  elif isinstance(err, errors.GenericError):
1914
    obuf.write("Unhandled Ganeti error: %s" % msg)
1915
  elif isinstance(err, JobSubmittedException):
1916
    obuf.write("JobID: %s\n" % err.args[0])
1917
    retcode = 0
1918
  else:
1919
    obuf.write("Unhandled exception: %s" % msg)
1920
  return retcode, obuf.getvalue().rstrip('\n')
1921

    
1922

    
1923
def GenericMain(commands, override=None, aliases=None):
1924
  """Generic main function for all the gnt-* commands.
1925

1926
  Arguments:
1927
    - commands: a dictionary with a special structure, see the design doc
1928
                for command line handling.
1929
    - override: if not None, we expect a dictionary with keys that will
1930
                override command line options; this can be used to pass
1931
                options from the scripts to generic functions
1932
    - aliases: dictionary with command aliases {'alias': 'target, ...}
1933

1934
  """
1935
  # save the program name and the entire command line for later logging
1936
  if sys.argv:
1937
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1938
    if len(sys.argv) >= 2:
1939
      binary += " " + sys.argv[1]
1940
      old_cmdline = " ".join(sys.argv[2:])
1941
    else:
1942
      old_cmdline = ""
1943
  else:
1944
    binary = "<unknown program>"
1945
    old_cmdline = ""
1946

    
1947
  if aliases is None:
1948
    aliases = {}
1949

    
1950
  try:
1951
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
1952
  except errors.ParameterError, err:
1953
    result, err_msg = FormatError(err)
1954
    ToStderr(err_msg)
1955
    return 1
1956

    
1957
  if func is None: # parse error
1958
    return 1
1959

    
1960
  if override is not None:
1961
    for key, val in override.iteritems():
1962
      setattr(options, key, val)
1963

    
1964
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1965
                     stderr_logging=True)
1966

    
1967
  if old_cmdline:
1968
    logging.info("run with arguments '%s'", old_cmdline)
1969
  else:
1970
    logging.info("run with no arguments")
1971

    
1972
  try:
1973
    result = func(options, args)
1974
  except (errors.GenericError, luxi.ProtocolError,
1975
          JobSubmittedException), err:
1976
    result, err_msg = FormatError(err)
1977
    logging.exception("Error during command processing")
1978
    ToStderr(err_msg)
1979
  except KeyboardInterrupt:
1980
    result = constants.EXIT_FAILURE
1981
    ToStderr("Aborted. Note that if the operation created any jobs, they"
1982
             " might have been submitted and"
1983
             " will continue to run in the background.")
1984

    
1985
  return result
1986

    
1987

    
1988
def ParseNicOption(optvalue):
1989
  """Parses the value of the --net option(s).
1990

1991
  """
1992
  try:
1993
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1994
  except (TypeError, ValueError), err:
1995
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1996

    
1997
  nics = [{}] * nic_max
1998
  for nidx, ndict in optvalue:
1999
    nidx = int(nidx)
2000

    
2001
    if not isinstance(ndict, dict):
2002
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2003
                                 " got %s" % (nidx, ndict))
2004

    
2005
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2006

    
2007
    nics[nidx] = ndict
2008

    
2009
  return nics
2010

    
2011

    
2012
def GenericInstanceCreate(mode, opts, args):
2013
  """Add an instance to the cluster via either creation or import.
2014

2015
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2016
  @param opts: the command line options selected by the user
2017
  @type args: list
2018
  @param args: should contain only one element, the new instance name
2019
  @rtype: int
2020
  @return: the desired exit code
2021

2022
  """
2023
  instance = args[0]
2024

    
2025
  (pnode, snode) = SplitNodeOption(opts.node)
2026

    
2027
  hypervisor = None
2028
  hvparams = {}
2029
  if opts.hypervisor:
2030
    hypervisor, hvparams = opts.hypervisor
2031

    
2032
  if opts.nics:
2033
    nics = ParseNicOption(opts.nics)
2034
  elif opts.no_nics:
2035
    # no nics
2036
    nics = []
2037
  elif mode == constants.INSTANCE_CREATE:
2038
    # default of one nic, all auto
2039
    nics = [{}]
2040
  else:
2041
    # mode == import
2042
    nics = []
2043

    
2044
  if opts.disk_template == constants.DT_DISKLESS:
2045
    if opts.disks or opts.sd_size is not None:
2046
      raise errors.OpPrereqError("Diskless instance but disk"
2047
                                 " information passed")
2048
    disks = []
2049
  else:
2050
    if (not opts.disks and not opts.sd_size
2051
        and mode == constants.INSTANCE_CREATE):
2052
      raise errors.OpPrereqError("No disk information specified")
2053
    if opts.disks and opts.sd_size is not None:
2054
      raise errors.OpPrereqError("Please use either the '--disk' or"
2055
                                 " '-s' option")
2056
    if opts.sd_size is not None:
2057
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2058

    
2059
    if opts.disks:
2060
      try:
2061
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2062
      except ValueError, err:
2063
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2064
      disks = [{}] * disk_max
2065
    else:
2066
      disks = []
2067
    for didx, ddict in opts.disks:
2068
      didx = int(didx)
2069
      if not isinstance(ddict, dict):
2070
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2071
        raise errors.OpPrereqError(msg)
2072
      elif constants.IDISK_SIZE in ddict:
2073
        if constants.IDISK_ADOPT in ddict:
2074
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2075
                                     " (disk %d)" % didx)
2076
        try:
2077
          ddict[constants.IDISK_SIZE] = \
2078
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2079
        except ValueError, err:
2080
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2081
                                     (didx, err))
2082
      elif constants.IDISK_ADOPT in ddict:
2083
        if mode == constants.INSTANCE_IMPORT:
2084
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2085
                                     " import")
2086
        ddict[constants.IDISK_SIZE] = 0
2087
      else:
2088
        raise errors.OpPrereqError("Missing size or adoption source for"
2089
                                   " disk %d" % didx)
2090
      disks[didx] = ddict
2091

    
2092
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2093
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2094

    
2095
  if mode == constants.INSTANCE_CREATE:
2096
    start = opts.start
2097
    os_type = opts.os
2098
    force_variant = opts.force_variant
2099
    src_node = None
2100
    src_path = None
2101
    no_install = opts.no_install
2102
    identify_defaults = False
2103
  elif mode == constants.INSTANCE_IMPORT:
2104
    start = False
2105
    os_type = None
2106
    force_variant = False
2107
    src_node = opts.src_node
2108
    src_path = opts.src_dir
2109
    no_install = None
2110
    identify_defaults = opts.identify_defaults
2111
  else:
2112
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2113

    
2114
  op = opcodes.OpInstanceCreate(instance_name=instance,
2115
                                disks=disks,
2116
                                disk_template=opts.disk_template,
2117
                                nics=nics,
2118
                                pnode=pnode, snode=snode,
2119
                                ip_check=opts.ip_check,
2120
                                name_check=opts.name_check,
2121
                                wait_for_sync=opts.wait_for_sync,
2122
                                file_storage_dir=opts.file_storage_dir,
2123
                                file_driver=opts.file_driver,
2124
                                iallocator=opts.iallocator,
2125
                                hypervisor=hypervisor,
2126
                                hvparams=hvparams,
2127
                                beparams=opts.beparams,
2128
                                osparams=opts.osparams,
2129
                                mode=mode,
2130
                                start=start,
2131
                                os_type=os_type,
2132
                                force_variant=force_variant,
2133
                                src_node=src_node,
2134
                                src_path=src_path,
2135
                                no_install=no_install,
2136
                                identify_defaults=identify_defaults)
2137

    
2138
  SubmitOrSend(op, opts)
2139
  return 0
2140

    
2141

    
2142
class _RunWhileClusterStoppedHelper:
2143
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2144

2145
  """
2146
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2147
    """Initializes this class.
2148

2149
    @type feedback_fn: callable
2150
    @param feedback_fn: Feedback function
2151
    @type cluster_name: string
2152
    @param cluster_name: Cluster name
2153
    @type master_node: string
2154
    @param master_node Master node name
2155
    @type online_nodes: list
2156
    @param online_nodes: List of names of online nodes
2157

2158
    """
2159
    self.feedback_fn = feedback_fn
2160
    self.cluster_name = cluster_name
2161
    self.master_node = master_node
2162
    self.online_nodes = online_nodes
2163

    
2164
    self.ssh = ssh.SshRunner(self.cluster_name)
2165

    
2166
    self.nonmaster_nodes = [name for name in online_nodes
2167
                            if name != master_node]
2168

    
2169
    assert self.master_node not in self.nonmaster_nodes
2170

    
2171
  def _RunCmd(self, node_name, cmd):
2172
    """Runs a command on the local or a remote machine.
2173

2174
    @type node_name: string
2175
    @param node_name: Machine name
2176
    @type cmd: list
2177
    @param cmd: Command
2178

2179
    """
2180
    if node_name is None or node_name == self.master_node:
2181
      # No need to use SSH
2182
      result = utils.RunCmd(cmd)
2183
    else:
2184
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2185

    
2186
    if result.failed:
2187
      errmsg = ["Failed to run command %s" % result.cmd]
2188
      if node_name:
2189
        errmsg.append("on node %s" % node_name)
2190
      errmsg.append(": exitcode %s and error %s" %
2191
                    (result.exit_code, result.output))
2192
      raise errors.OpExecError(" ".join(errmsg))
2193

    
2194
  def Call(self, fn, *args):
2195
    """Call function while all daemons are stopped.
2196

2197
    @type fn: callable
2198
    @param fn: Function to be called
2199

2200
    """
2201
    # Pause watcher by acquiring an exclusive lock on watcher state file
2202
    self.feedback_fn("Blocking watcher")
2203
    watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2204
    try:
2205
      # TODO: Currently, this just blocks. There's no timeout.
2206
      # TODO: Should it be a shared lock?
2207
      watcher_block.Exclusive(blocking=True)
2208

    
2209
      # Stop master daemons, so that no new jobs can come in and all running
2210
      # ones are finished
2211
      self.feedback_fn("Stopping master daemons")
2212
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2213
      try:
2214
        # Stop daemons on all nodes
2215
        for node_name in self.online_nodes:
2216
          self.feedback_fn("Stopping daemons on %s" % node_name)
2217
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2218

    
2219
        # All daemons are shut down now
2220
        try:
2221
          return fn(self, *args)
2222
        except Exception, err:
2223
          _, errmsg = FormatError(err)
2224
          logging.exception("Caught exception")
2225
          self.feedback_fn(errmsg)
2226
          raise
2227
      finally:
2228
        # Start cluster again, master node last
2229
        for node_name in self.nonmaster_nodes + [self.master_node]:
2230
          self.feedback_fn("Starting daemons on %s" % node_name)
2231
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2232
    finally:
2233
      # Resume watcher
2234
      watcher_block.Close()
2235

    
2236

    
2237
def RunWhileClusterStopped(feedback_fn, fn, *args):
2238
  """Calls a function while all cluster daemons are stopped.
2239

2240
  @type feedback_fn: callable
2241
  @param feedback_fn: Feedback function
2242
  @type fn: callable
2243
  @param fn: Function to be called when daemons are stopped
2244

2245
  """
2246
  feedback_fn("Gathering cluster information")
2247

    
2248
  # This ensures we're running on the master daemon
2249
  cl = GetClient()
2250

    
2251
  (cluster_name, master_node) = \
2252
    cl.QueryConfigValues(["cluster_name", "master_node"])
2253

    
2254
  online_nodes = GetOnlineNodes([], cl=cl)
2255

    
2256
  # Don't keep a reference to the client. The master daemon will go away.
2257
  del cl
2258

    
2259
  assert master_node in online_nodes
2260

    
2261
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2262
                                       online_nodes).Call(fn, *args)
2263

    
2264

    
2265
def GenerateTable(headers, fields, separator, data,
2266
                  numfields=None, unitfields=None,
2267
                  units=None):
2268
  """Prints a table with headers and different fields.
2269

2270
  @type headers: dict
2271
  @param headers: dictionary mapping field names to headers for
2272
      the table
2273
  @type fields: list
2274
  @param fields: the field names corresponding to each row in
2275
      the data field
2276
  @param separator: the separator to be used; if this is None,
2277
      the default 'smart' algorithm is used which computes optimal
2278
      field width, otherwise just the separator is used between
2279
      each field
2280
  @type data: list
2281
  @param data: a list of lists, each sublist being one row to be output
2282
  @type numfields: list
2283
  @param numfields: a list with the fields that hold numeric
2284
      values and thus should be right-aligned
2285
  @type unitfields: list
2286
  @param unitfields: a list with the fields that hold numeric
2287
      values that should be formatted with the units field
2288
  @type units: string or None
2289
  @param units: the units we should use for formatting, or None for
2290
      automatic choice (human-readable for non-separator usage, otherwise
2291
      megabytes); this is a one-letter string
2292

2293
  """
2294
  if units is None:
2295
    if separator:
2296
      units = "m"
2297
    else:
2298
      units = "h"
2299

    
2300
  if numfields is None:
2301
    numfields = []
2302
  if unitfields is None:
2303
    unitfields = []
2304

    
2305
  numfields = utils.FieldSet(*numfields)   # pylint: disable-msg=W0142
2306
  unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142
2307

    
2308
  format_fields = []
2309
  for field in fields:
2310
    if headers and field not in headers:
2311
      # TODO: handle better unknown fields (either revert to old
2312
      # style of raising exception, or deal more intelligently with
2313
      # variable fields)
2314
      headers[field] = field
2315
    if separator is not None:
2316
      format_fields.append("%s")
2317
    elif numfields.Matches(field):
2318
      format_fields.append("%*s")
2319
    else:
2320
      format_fields.append("%-*s")
2321

    
2322
  if separator is None:
2323
    mlens = [0 for name in fields]
2324
    format_str = ' '.join(format_fields)
2325
  else:
2326
    format_str = separator.replace("%", "%%").join(format_fields)
2327

    
2328
  for row in data:
2329
    if row is None:
2330
      continue
2331
    for idx, val in enumerate(row):
2332
      if unitfields.Matches(fields[idx]):
2333
        try:
2334
          val = int(val)
2335
        except (TypeError, ValueError):
2336
          pass
2337
        else:
2338
          val = row[idx] = utils.FormatUnit(val, units)
2339
      val = row[idx] = str(val)
2340
      if separator is None:
2341
        mlens[idx] = max(mlens[idx], len(val))
2342

    
2343
  result = []
2344
  if headers:
2345
    args = []
2346
    for idx, name in enumerate(fields):
2347
      hdr = headers[name]
2348
      if separator is None:
2349
        mlens[idx] = max(mlens[idx], len(hdr))
2350
        args.append(mlens[idx])
2351
      args.append(hdr)
2352
    result.append(format_str % tuple(args))
2353

    
2354
  if separator is None:
2355
    assert len(mlens) == len(fields)
2356

    
2357
    if fields and not numfields.Matches(fields[-1]):
2358
      mlens[-1] = 0
2359

    
2360
  for line in data:
2361
    args = []
2362
    if line is None:
2363
      line = ['-' for _ in fields]
2364
    for idx in range(len(fields)):
2365
      if separator is None:
2366
        args.append(mlens[idx])
2367
      args.append(line[idx])
2368
    result.append(format_str % tuple(args))
2369

    
2370
  return result
2371

    
2372

    
2373
def _FormatBool(value):
2374
  """Formats a boolean value as a string.
2375

2376
  """
2377
  if value:
2378
    return "Y"
2379
  return "N"
2380

    
2381

    
2382
#: Default formatting for query results; (callback, align right)
2383
_DEFAULT_FORMAT_QUERY = {
2384
  constants.QFT_TEXT: (str, False),
2385
  constants.QFT_BOOL: (_FormatBool, False),
2386
  constants.QFT_NUMBER: (str, True),
2387
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2388
  constants.QFT_OTHER: (str, False),
2389
  constants.QFT_UNKNOWN: (str, False),
2390
  }
2391

    
2392

    
2393
def _GetColumnFormatter(fdef, override, unit):
2394
  """Returns formatting function for a field.
2395

2396
  @type fdef: L{objects.QueryFieldDefinition}
2397
  @type override: dict
2398
  @param override: Dictionary for overriding field formatting functions,
2399
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2400
  @type unit: string
2401
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2402
  @rtype: tuple; (callable, bool)
2403
  @return: Returns the function to format a value (takes one parameter) and a
2404
    boolean for aligning the value on the right-hand side
2405

2406
  """
2407
  fmt = override.get(fdef.name, None)
2408
  if fmt is not None:
2409
    return fmt
2410

    
2411
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2412

    
2413
  if fdef.kind == constants.QFT_UNIT:
2414
    # Can't keep this information in the static dictionary
2415
    return (lambda value: utils.FormatUnit(value, unit), True)
2416

    
2417
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2418
  if fmt is not None:
2419
    return fmt
2420

    
2421
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2422

    
2423

    
2424
class _QueryColumnFormatter:
2425
  """Callable class for formatting fields of a query.
2426

2427
  """
2428
  def __init__(self, fn, status_fn, verbose):
2429
    """Initializes this class.
2430

2431
    @type fn: callable
2432
    @param fn: Formatting function
2433
    @type status_fn: callable
2434
    @param status_fn: Function to report fields' status
2435
    @type verbose: boolean
2436
    @param verbose: whether to use verbose field descriptions or not
2437

2438
    """
2439
    self._fn = fn
2440
    self._status_fn = status_fn
2441
    self._verbose = verbose
2442

    
2443
  def __call__(self, data):
2444
    """Returns a field's string representation.
2445

2446
    """
2447
    (status, value) = data
2448

    
2449
    # Report status
2450
    self._status_fn(status)
2451

    
2452
    if status == constants.RS_NORMAL:
2453
      return self._fn(value)
2454

    
2455
    assert value is None, \
2456
           "Found value %r for abnormal status %s" % (value, status)
2457

    
2458
    return FormatResultError(status, self._verbose)
2459

    
2460

    
2461
def FormatResultError(status, verbose):
2462
  """Formats result status other than L{constants.RS_NORMAL}.
2463

2464
  @param status: The result status
2465
  @type verbose: boolean
2466
  @param verbose: Whether to return the verbose text
2467
  @return: Text of result status
2468

2469
  """
2470
  assert status != constants.RS_NORMAL, \
2471
         "FormatResultError called with status equal to constants.RS_NORMAL"
2472
  try:
2473
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2474
  except KeyError:
2475
    raise NotImplementedError("Unknown status %s" % status)
2476
  else:
2477
    if verbose:
2478
      return verbose_text
2479
    return normal_text
2480

    
2481

    
2482
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2483
                      header=False, verbose=False):
2484
  """Formats data in L{objects.QueryResponse}.
2485

2486
  @type result: L{objects.QueryResponse}
2487
  @param result: result of query operation
2488
  @type unit: string
2489
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2490
    see L{utils.text.FormatUnit}
2491
  @type format_override: dict
2492
  @param format_override: Dictionary for overriding field formatting functions,
2493
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2494
  @type separator: string or None
2495
  @param separator: String used to separate fields
2496
  @type header: bool
2497
  @param header: Whether to output header row
2498
  @type verbose: boolean
2499
  @param verbose: whether to use verbose field descriptions or not
2500

2501
  """
2502
  if unit is None:
2503
    if separator:
2504
      unit = "m"
2505
    else:
2506
      unit = "h"
2507

    
2508
  if format_override is None:
2509
    format_override = {}
2510

    
2511
  stats = dict.fromkeys(constants.RS_ALL, 0)
2512

    
2513
  def _RecordStatus(status):
2514
    if status in stats:
2515
      stats[status] += 1
2516

    
2517
  columns = []
2518
  for fdef in result.fields:
2519
    assert fdef.title and fdef.name
2520
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2521
    columns.append(TableColumn(fdef.title,
2522
                               _QueryColumnFormatter(fn, _RecordStatus,
2523
                                                     verbose),
2524
                               align_right))
2525

    
2526
  table = FormatTable(result.data, columns, header, separator)
2527

    
2528
  # Collect statistics
2529
  assert len(stats) == len(constants.RS_ALL)
2530
  assert compat.all(count >= 0 for count in stats.values())
2531

    
2532
  # Determine overall status. If there was no data, unknown fields must be
2533
  # detected via the field definitions.
2534
  if (stats[constants.RS_UNKNOWN] or
2535
      (not result.data and _GetUnknownFields(result.fields))):
2536
    status = QR_UNKNOWN
2537
  elif compat.any(count > 0 for key, count in stats.items()
2538
                  if key != constants.RS_NORMAL):
2539
    status = QR_INCOMPLETE
2540
  else:
2541
    status = QR_NORMAL
2542

    
2543
  return (status, table)
2544

    
2545

    
2546
def _GetUnknownFields(fdefs):
2547
  """Returns list of unknown fields included in C{fdefs}.
2548

2549
  @type fdefs: list of L{objects.QueryFieldDefinition}
2550

2551
  """
2552
  return [fdef for fdef in fdefs
2553
          if fdef.kind == constants.QFT_UNKNOWN]
2554

    
2555

    
2556
def _WarnUnknownFields(fdefs):
2557
  """Prints a warning to stderr if a query included unknown fields.
2558

2559
  @type fdefs: list of L{objects.QueryFieldDefinition}
2560

2561
  """
2562
  unknown = _GetUnknownFields(fdefs)
2563
  if unknown:
2564
    ToStderr("Warning: Queried for unknown fields %s",
2565
             utils.CommaJoin(fdef.name for fdef in unknown))
2566
    return True
2567

    
2568
  return False
2569

    
2570

    
2571
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2572
                format_override=None, verbose=False, force_filter=False):
2573
  """Generic implementation for listing all items of a resource.
2574

2575
  @param resource: One of L{constants.QR_VIA_LUXI}
2576
  @type fields: list of strings
2577
  @param fields: List of fields to query for
2578
  @type names: list of strings
2579
  @param names: Names of items to query for
2580
  @type unit: string or None
2581
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2582
    None for automatic choice (human-readable for non-separator usage,
2583
    otherwise megabytes); this is a one-letter string
2584
  @type separator: string or None
2585
  @param separator: String used to separate fields
2586
  @type header: bool
2587
  @param header: Whether to show header row
2588
  @type force_filter: bool
2589
  @param force_filter: Whether to always treat names as filter
2590
  @type format_override: dict
2591
  @param format_override: Dictionary for overriding field formatting functions,
2592
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2593
  @type verbose: boolean
2594
  @param verbose: whether to use verbose field descriptions or not
2595

2596
  """
2597
  if cl is None:
2598
    cl = GetClient()
2599

    
2600
  if not names:
2601
    names = None
2602

    
2603
  if (force_filter or
2604
      (names and len(names) == 1 and qlang.MaybeFilter(names[0]))):
2605
    try:
2606
      (filter_text, ) = names
2607
    except ValueError:
2608
      raise errors.OpPrereqError("Exactly one argument must be given as a"
2609
                                 " filter")
2610

    
2611
    logging.debug("Parsing '%s' as filter", filter_text)
2612
    filter_ = qlang.ParseFilter(filter_text)
2613
  else:
2614
    filter_ = qlang.MakeSimpleFilter("name", names)
2615

    
2616
  response = cl.Query(resource, fields, filter_)
2617

    
2618
  found_unknown = _WarnUnknownFields(response.fields)
2619

    
2620
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2621
                                     header=header,
2622
                                     format_override=format_override,
2623
                                     verbose=verbose)
2624

    
2625
  for line in data:
2626
    ToStdout(line)
2627

    
2628
  assert ((found_unknown and status == QR_UNKNOWN) or
2629
          (not found_unknown and status != QR_UNKNOWN))
2630

    
2631
  if status == QR_UNKNOWN:
2632
    return constants.EXIT_UNKNOWN_FIELD
2633

    
2634
  # TODO: Should the list command fail if not all data could be collected?
2635
  return constants.EXIT_SUCCESS
2636

    
2637

    
2638
def GenericListFields(resource, fields, separator, header, cl=None):
2639
  """Generic implementation for listing fields for a resource.
2640

2641
  @param resource: One of L{constants.QR_VIA_LUXI}
2642
  @type fields: list of strings
2643
  @param fields: List of fields to query for
2644
  @type separator: string or None
2645
  @param separator: String used to separate fields
2646
  @type header: bool
2647
  @param header: Whether to show header row
2648

2649
  """
2650
  if cl is None:
2651
    cl = GetClient()
2652

    
2653
  if not fields:
2654
    fields = None
2655

    
2656
  response = cl.QueryFields(resource, fields)
2657

    
2658
  found_unknown = _WarnUnknownFields(response.fields)
2659

    
2660
  columns = [
2661
    TableColumn("Name", str, False),
2662
    TableColumn("Title", str, False),
2663
    TableColumn("Description", str, False),
2664
    ]
2665

    
2666
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2667

    
2668
  for line in FormatTable(rows, columns, header, separator):
2669
    ToStdout(line)
2670

    
2671
  if found_unknown:
2672
    return constants.EXIT_UNKNOWN_FIELD
2673

    
2674
  return constants.EXIT_SUCCESS
2675

    
2676

    
2677
class TableColumn:
2678
  """Describes a column for L{FormatTable}.
2679

2680
  """
2681
  def __init__(self, title, fn, align_right):
2682
    """Initializes this class.
2683

2684
    @type title: string
2685
    @param title: Column title
2686
    @type fn: callable
2687
    @param fn: Formatting function
2688
    @type align_right: bool
2689
    @param align_right: Whether to align values on the right-hand side
2690

2691
    """
2692
    self.title = title
2693
    self.format = fn
2694
    self.align_right = align_right
2695

    
2696

    
2697
def _GetColFormatString(width, align_right):
2698
  """Returns the format string for a field.
2699

2700
  """
2701
  if align_right:
2702
    sign = ""
2703
  else:
2704
    sign = "-"
2705

    
2706
  return "%%%s%ss" % (sign, width)
2707

    
2708

    
2709
def FormatTable(rows, columns, header, separator):
2710
  """Formats data as a table.
2711

2712
  @type rows: list of lists
2713
  @param rows: Row data, one list per row
2714
  @type columns: list of L{TableColumn}
2715
  @param columns: Column descriptions
2716
  @type header: bool
2717
  @param header: Whether to show header row
2718
  @type separator: string or None
2719
  @param separator: String used to separate columns
2720

2721
  """
2722
  if header:
2723
    data = [[col.title for col in columns]]
2724
    colwidth = [len(col.title) for col in columns]
2725
  else:
2726
    data = []
2727
    colwidth = [0 for _ in columns]
2728

    
2729
  # Format row data
2730
  for row in rows:
2731
    assert len(row) == len(columns)
2732

    
2733
    formatted = [col.format(value) for value, col in zip(row, columns)]
2734

    
2735
    if separator is None:
2736
      # Update column widths
2737
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2738
        # Modifying a list's items while iterating is fine
2739
        colwidth[idx] = max(oldwidth, len(value))
2740

    
2741
    data.append(formatted)
2742

    
2743
  if separator is not None:
2744
    # Return early if a separator is used
2745
    return [separator.join(row) for row in data]
2746

    
2747
  if columns and not columns[-1].align_right:
2748
    # Avoid unnecessary spaces at end of line
2749
    colwidth[-1] = 0
2750

    
2751
  # Build format string
2752
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2753
                  for col, width in zip(columns, colwidth)])
2754

    
2755
  return [fmt % tuple(row) for row in data]
2756

    
2757

    
2758
def FormatTimestamp(ts):
2759
  """Formats a given timestamp.
2760

2761
  @type ts: timestamp
2762
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2763

2764
  @rtype: string
2765
  @return: a string with the formatted timestamp
2766

2767
  """
2768
  if not isinstance (ts, (tuple, list)) or len(ts) != 2:
2769
    return '?'
2770
  sec, usec = ts
2771
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2772

    
2773

    
2774
def ParseTimespec(value):
2775
  """Parse a time specification.
2776

2777
  The following suffixed will be recognized:
2778

2779
    - s: seconds
2780
    - m: minutes
2781
    - h: hours
2782
    - d: day
2783
    - w: weeks
2784

2785
  Without any suffix, the value will be taken to be in seconds.
2786

2787
  """
2788
  value = str(value)
2789
  if not value:
2790
    raise errors.OpPrereqError("Empty time specification passed")
2791
  suffix_map = {
2792
    's': 1,
2793
    'm': 60,
2794
    'h': 3600,
2795
    'd': 86400,
2796
    'w': 604800,
2797
    }
2798
  if value[-1] not in suffix_map:
2799
    try:
2800
      value = int(value)
2801
    except (TypeError, ValueError):
2802
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2803
  else:
2804
    multiplier = suffix_map[value[-1]]
2805
    value = value[:-1]
2806
    if not value: # no data left after stripping the suffix
2807
      raise errors.OpPrereqError("Invalid time specification (only"
2808
                                 " suffix passed)")
2809
    try:
2810
      value = int(value) * multiplier
2811
    except (TypeError, ValueError):
2812
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2813
  return value
2814

    
2815

    
2816
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2817
                   filter_master=False):
2818
  """Returns the names of online nodes.
2819

2820
  This function will also log a warning on stderr with the names of
2821
  the online nodes.
2822

2823
  @param nodes: if not empty, use only this subset of nodes (minus the
2824
      offline ones)
2825
  @param cl: if not None, luxi client to use
2826
  @type nowarn: boolean
2827
  @param nowarn: by default, this function will output a note with the
2828
      offline nodes that are skipped; if this parameter is True the
2829
      note is not displayed
2830
  @type secondary_ips: boolean
2831
  @param secondary_ips: if True, return the secondary IPs instead of the
2832
      names, useful for doing network traffic over the replication interface
2833
      (if any)
2834
  @type filter_master: boolean
2835
  @param filter_master: if True, do not return the master node in the list
2836
      (useful in coordination with secondary_ips where we cannot check our
2837
      node name against the list)
2838

2839
  """
2840
  if cl is None:
2841
    cl = GetClient()
2842

    
2843
  if secondary_ips:
2844
    name_idx = 2
2845
  else:
2846
    name_idx = 0
2847

    
2848
  if filter_master:
2849
    master_node = cl.QueryConfigValues(["master_node"])[0]
2850
    filter_fn = lambda x: x != master_node
2851
  else:
2852
    filter_fn = lambda _: True
2853

    
2854
  result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2855
                         use_locking=False)
2856
  offline = [row[0] for row in result if row[1]]
2857
  if offline and not nowarn:
2858
    ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2859
  return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2860

    
2861

    
2862
def _ToStream(stream, txt, *args):
2863
  """Write a message to a stream, bypassing the logging system
2864

2865
  @type stream: file object
2866
  @param stream: the file to which we should write
2867
  @type txt: str
2868
  @param txt: the message
2869

2870
  """
2871
  if args:
2872
    args = tuple(args)
2873
    stream.write(txt % args)
2874
  else:
2875
    stream.write(txt)
2876
  stream.write('\n')
2877
  stream.flush()
2878

    
2879

    
2880
def ToStdout(txt, *args):
2881
  """Write a message to stdout only, bypassing the logging system
2882

2883
  This is just a wrapper over _ToStream.
2884

2885
  @type txt: str
2886
  @param txt: the message
2887

2888
  """
2889
  _ToStream(sys.stdout, txt, *args)
2890

    
2891

    
2892
def ToStderr(txt, *args):
2893
  """Write a message to stderr only, bypassing the logging system
2894

2895
  This is just a wrapper over _ToStream.
2896

2897
  @type txt: str
2898
  @param txt: the message
2899

2900
  """
2901
  _ToStream(sys.stderr, txt, *args)
2902

    
2903

    
2904
class JobExecutor(object):
2905
  """Class which manages the submission and execution of multiple jobs.
2906

2907
  Note that instances of this class should not be reused between
2908
  GetResults() calls.
2909

2910
  """
2911
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2912
    self.queue = []
2913
    if cl is None:
2914
      cl = GetClient()
2915
    self.cl = cl
2916
    self.verbose = verbose
2917
    self.jobs = []
2918
    self.opts = opts
2919
    self.feedback_fn = feedback_fn
2920

    
2921
  def QueueJob(self, name, *ops):
2922
    """Record a job for later submit.
2923

2924
    @type name: string
2925
    @param name: a description of the job, will be used in WaitJobSet
2926
    """
2927
    SetGenericOpcodeOpts(ops, self.opts)
2928
    self.queue.append((name, ops))
2929

    
2930
  def SubmitPending(self, each=False):
2931
    """Submit all pending jobs.
2932

2933
    """
2934
    if each:
2935
      results = []
2936
      for row in self.queue:
2937
        # SubmitJob will remove the success status, but raise an exception if
2938
        # the submission fails, so we'll notice that anyway.
2939
        results.append([True, self.cl.SubmitJob(row[1])])
2940
    else:
2941
      results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2942
    for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2943
                                                            self.queue)):
2944
      self.jobs.append((idx, status, data, name))
2945

    
2946
  def _ChooseJob(self):
2947
    """Choose a non-waiting/queued job to poll next.
2948

2949
    """
2950
    assert self.jobs, "_ChooseJob called with empty job list"
2951

    
2952
    result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2953
    assert result
2954

    
2955
    for job_data, status in zip(self.jobs, result):
2956
      if (isinstance(status, list) and status and
2957
          status[0] in (constants.JOB_STATUS_QUEUED,
2958
                        constants.JOB_STATUS_WAITLOCK,
2959
                        constants.JOB_STATUS_CANCELING)):
2960
        # job is still present and waiting
2961
        continue
2962
      # good candidate found (either running job or lost job)
2963
      self.jobs.remove(job_data)
2964
      return job_data
2965

    
2966
    # no job found
2967
    return self.jobs.pop(0)
2968

    
2969
  def GetResults(self):
2970
    """Wait for and return the results of all jobs.
2971

2972
    @rtype: list
2973
    @return: list of tuples (success, job results), in the same order
2974
        as the submitted jobs; if a job has failed, instead of the result
2975
        there will be the error message
2976

2977
    """
2978
    if not self.jobs:
2979
      self.SubmitPending()
2980
    results = []
2981
    if self.verbose:
2982
      ok_jobs = [row[2] for row in self.jobs if row[1]]
2983
      if ok_jobs:
2984
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2985

    
2986
    # first, remove any non-submitted jobs
2987
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2988
    for idx, _, jid, name in failures:
2989
      ToStderr("Failed to submit job for %s: %s", name, jid)
2990
      results.append((idx, False, jid))
2991

    
2992
    while self.jobs:
2993
      (idx, _, jid, name) = self._ChooseJob()
2994
      ToStdout("Waiting for job %s for %s...", jid, name)
2995
      try:
2996
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2997
        success = True
2998
      except errors.JobLost, err:
2999
        _, job_result = FormatError(err)
3000
        ToStderr("Job %s for %s has been archived, cannot check its result",
3001
                 jid, name)
3002
        success = False
3003
      except (errors.GenericError, luxi.ProtocolError), err:
3004
        _, job_result = FormatError(err)
3005
        success = False
3006
        # the error message will always be shown, verbose or not
3007
        ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
3008

    
3009
      results.append((idx, success, job_result))
3010

    
3011
    # sort based on the index, then drop it
3012
    results.sort()
3013
    results = [i[1:] for i in results]
3014

    
3015
    return results
3016

    
3017
  def WaitOrShow(self, wait):
3018
    """Wait for job results or only print the job IDs.
3019

3020
    @type wait: boolean
3021
    @param wait: whether to wait or not
3022

3023
    """
3024
    if wait:
3025
      return self.GetResults()
3026
    else:
3027
      if not self.jobs:
3028
        self.SubmitPending()
3029
      for _, status, result, name in self.jobs:
3030
        if status:
3031
          ToStdout("%s: %s", result, name)
3032
        else:
3033
          ToStderr("Failure for %s: %s", name, result)
3034
      return [row[1:3] for row in self.jobs]
3035

    
3036

    
3037
def FormatParameterDict(buf, param_dict, actual, level=1):
3038
  """Formats a parameter dictionary.
3039

3040
  @type buf: L{StringIO}
3041
  @param buf: the buffer into which to write
3042
  @type param_dict: dict
3043
  @param param_dict: the own parameters
3044
  @type actual: dict
3045
  @param actual: the current parameter set (including defaults)
3046
  @param level: Level of indent
3047

3048
  """
3049
  indent = "  " * level
3050
  for key in sorted(actual):
3051
    val = param_dict.get(key, "default (%s)" % actual[key])
3052
    buf.write("%s- %s: %s\n" % (indent, key, val))
3053

    
3054

    
3055
def ConfirmOperation(names, list_type, text, extra=""):
3056
  """Ask the user to confirm an operation on a list of list_type.
3057

3058
  This function is used to request confirmation for doing an operation
3059
  on a given list of list_type.
3060

3061
  @type names: list
3062
  @param names: the list of names that we display when
3063
      we ask for confirmation
3064
  @type list_type: str
3065
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3066
  @type text: str
3067
  @param text: the operation that the user should confirm
3068
  @rtype: boolean
3069
  @return: True or False depending on user's confirmation.
3070

3071
  """
3072
  count = len(names)
3073
  msg = ("The %s will operate on %d %s.\n%s"
3074
         "Do you want to continue?" % (text, count, list_type, extra))
3075
  affected = (("\nAffected %s:\n" % list_type) +
3076
              "\n".join(["  %s" % name for name in names]))
3077

    
3078
  choices = [("y", True, "Yes, execute the %s" % text),
3079
             ("n", False, "No, abort the %s" % text)]
3080

    
3081
  if count > 20:
3082
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3083
    question = msg
3084
  else:
3085
    question = msg + affected
3086

    
3087
  choice = AskUser(question, choices)
3088
  if choice == "v":
3089
    choices.pop(1)
3090
    choice = AskUser(msg + affected, choices)
3091
  return choice