Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 5a8648eb

History | View | Annotate | Download (103.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
from cStringIO import StringIO
33

    
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import constants
37
from ganeti import opcodes
38
from ganeti import luxi
39
from ganeti import ssconf
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45

    
46
from optparse import (OptionParser, TitledHelpFormatter,
47
                      Option, OptionValueError)
48

    
49

    
50
__all__ = [
51
  # Command line options
52
  "ADD_UIDS_OPT",
53
  "ALLOCATABLE_OPT",
54
  "ALLOC_POLICY_OPT",
55
  "ALL_OPT",
56
  "ALLOW_FAILOVER_OPT",
57
  "AUTO_PROMOTE_OPT",
58
  "AUTO_REPLACE_OPT",
59
  "BACKEND_OPT",
60
  "BLK_OS_OPT",
61
  "CAPAB_MASTER_OPT",
62
  "CAPAB_VM_OPT",
63
  "CLEANUP_OPT",
64
  "CLUSTER_DOMAIN_SECRET_OPT",
65
  "CONFIRM_OPT",
66
  "CP_SIZE_OPT",
67
  "DEBUG_OPT",
68
  "DEBUG_SIMERR_OPT",
69
  "DISKIDX_OPT",
70
  "DISK_OPT",
71
  "DISK_TEMPLATE_OPT",
72
  "DRAINED_OPT",
73
  "DRY_RUN_OPT",
74
  "DRBD_HELPER_OPT",
75
  "DST_NODE_OPT",
76
  "EARLY_RELEASE_OPT",
77
  "ENABLED_HV_OPT",
78
  "ERROR_CODES_OPT",
79
  "FIELDS_OPT",
80
  "FILESTORE_DIR_OPT",
81
  "FILESTORE_DRIVER_OPT",
82
  "FORCE_FILTER_OPT",
83
  "FORCE_OPT",
84
  "FORCE_VARIANT_OPT",
85
  "GLOBAL_FILEDIR_OPT",
86
  "HID_OS_OPT",
87
  "GLOBAL_SHARED_FILEDIR_OPT",
88
  "HVLIST_OPT",
89
  "HVOPTS_OPT",
90
  "HYPERVISOR_OPT",
91
  "IALLOCATOR_OPT",
92
  "DEFAULT_IALLOCATOR_OPT",
93
  "IDENTIFY_DEFAULTS_OPT",
94
  "IGNORE_CONSIST_OPT",
95
  "IGNORE_FAILURES_OPT",
96
  "IGNORE_OFFLINE_OPT",
97
  "IGNORE_REMOVE_FAILURES_OPT",
98
  "IGNORE_SECONDARIES_OPT",
99
  "IGNORE_SIZE_OPT",
100
  "INTERVAL_OPT",
101
  "MAC_PREFIX_OPT",
102
  "MAINTAIN_NODE_HEALTH_OPT",
103
  "MASTER_NETDEV_OPT",
104
  "MASTER_NETMASK_OPT",
105
  "MC_OPT",
106
  "MIGRATION_MODE_OPT",
107
  "NET_OPT",
108
  "NEW_CLUSTER_CERT_OPT",
109
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
110
  "NEW_CONFD_HMAC_KEY_OPT",
111
  "NEW_RAPI_CERT_OPT",
112
  "NEW_SECONDARY_OPT",
113
  "NEW_SPICE_CERT_OPT",
114
  "NIC_PARAMS_OPT",
115
  "NODE_FORCE_JOIN_OPT",
116
  "NODE_LIST_OPT",
117
  "NODE_PLACEMENT_OPT",
118
  "NODEGROUP_OPT",
119
  "NODE_PARAMS_OPT",
120
  "NODE_POWERED_OPT",
121
  "NODRBD_STORAGE_OPT",
122
  "NOHDR_OPT",
123
  "NOIPCHECK_OPT",
124
  "NO_INSTALL_OPT",
125
  "NONAMECHECK_OPT",
126
  "NOLVM_STORAGE_OPT",
127
  "NOMODIFY_ETCHOSTS_OPT",
128
  "NOMODIFY_SSH_SETUP_OPT",
129
  "NONICS_OPT",
130
  "NONLIVE_OPT",
131
  "NONPLUS1_OPT",
132
  "NOSHUTDOWN_OPT",
133
  "NOSTART_OPT",
134
  "NOSSH_KEYCHECK_OPT",
135
  "NOVOTING_OPT",
136
  "NO_REMEMBER_OPT",
137
  "NWSYNC_OPT",
138
  "ON_PRIMARY_OPT",
139
  "ON_SECONDARY_OPT",
140
  "OFFLINE_OPT",
141
  "OSPARAMS_OPT",
142
  "OS_OPT",
143
  "OS_SIZE_OPT",
144
  "OOB_TIMEOUT_OPT",
145
  "POWER_DELAY_OPT",
146
  "PREALLOC_WIPE_DISKS_OPT",
147
  "PRIMARY_IP_VERSION_OPT",
148
  "PRIMARY_ONLY_OPT",
149
  "PRIORITY_OPT",
150
  "RAPI_CERT_OPT",
151
  "READD_OPT",
152
  "REBOOT_TYPE_OPT",
153
  "REMOVE_INSTANCE_OPT",
154
  "REMOVE_UIDS_OPT",
155
  "RESERVED_LVS_OPT",
156
  "ROMAN_OPT",
157
  "SECONDARY_IP_OPT",
158
  "SECONDARY_ONLY_OPT",
159
  "SELECT_OS_OPT",
160
  "SEP_OPT",
161
  "SHOWCMD_OPT",
162
  "SHUTDOWN_TIMEOUT_OPT",
163
  "SINGLE_NODE_OPT",
164
  "SPICE_CACERT_OPT",
165
  "SPICE_CERT_OPT",
166
  "SRC_DIR_OPT",
167
  "SRC_NODE_OPT",
168
  "SUBMIT_OPT",
169
  "STARTUP_PAUSED_OPT",
170
  "STATIC_OPT",
171
  "SYNC_OPT",
172
  "TAG_ADD_OPT",
173
  "TAG_SRC_OPT",
174
  "TIMEOUT_OPT",
175
  "TO_GROUP_OPT",
176
  "UIDPOOL_OPT",
177
  "USEUNITS_OPT",
178
  "USE_REPL_NET_OPT",
179
  "VERBOSE_OPT",
180
  "VG_NAME_OPT",
181
  "YES_DOIT_OPT",
182
  # Generic functions for CLI programs
183
  "ConfirmOperation",
184
  "GenericMain",
185
  "GenericInstanceCreate",
186
  "GenericList",
187
  "GenericListFields",
188
  "GetClient",
189
  "GetOnlineNodes",
190
  "JobExecutor",
191
  "JobSubmittedException",
192
  "ParseTimespec",
193
  "RunWhileClusterStopped",
194
  "SubmitOpCode",
195
  "SubmitOrSend",
196
  "UsesRPC",
197
  # Formatting functions
198
  "ToStderr", "ToStdout",
199
  "FormatError",
200
  "FormatQueryResult",
201
  "FormatParameterDict",
202
  "GenerateTable",
203
  "AskUser",
204
  "FormatTimestamp",
205
  "FormatLogMessage",
206
  # Tags functions
207
  "ListTags",
208
  "AddTags",
209
  "RemoveTags",
210
  # command line options support infrastructure
211
  "ARGS_MANY_INSTANCES",
212
  "ARGS_MANY_NODES",
213
  "ARGS_MANY_GROUPS",
214
  "ARGS_NONE",
215
  "ARGS_ONE_INSTANCE",
216
  "ARGS_ONE_NODE",
217
  "ARGS_ONE_GROUP",
218
  "ARGS_ONE_OS",
219
  "ArgChoice",
220
  "ArgCommand",
221
  "ArgFile",
222
  "ArgGroup",
223
  "ArgHost",
224
  "ArgInstance",
225
  "ArgJobId",
226
  "ArgNode",
227
  "ArgOs",
228
  "ArgSuggest",
229
  "ArgUnknown",
230
  "OPT_COMPL_INST_ADD_NODES",
231
  "OPT_COMPL_MANY_NODES",
232
  "OPT_COMPL_ONE_IALLOCATOR",
233
  "OPT_COMPL_ONE_INSTANCE",
234
  "OPT_COMPL_ONE_NODE",
235
  "OPT_COMPL_ONE_NODEGROUP",
236
  "OPT_COMPL_ONE_OS",
237
  "cli_option",
238
  "SplitNodeOption",
239
  "CalculateOSNames",
240
  "ParseFields",
241
  "COMMON_CREATE_OPTS",
242
  ]
243

    
244
NO_PREFIX = "no_"
245
UN_PREFIX = "-"
246

    
247
#: Priorities (sorted)
248
_PRIORITY_NAMES = [
249
  ("low", constants.OP_PRIO_LOW),
250
  ("normal", constants.OP_PRIO_NORMAL),
251
  ("high", constants.OP_PRIO_HIGH),
252
  ]
253

    
254
#: Priority dictionary for easier lookup
255
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
256
# we migrate to Python 2.6
257
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
258

    
259
# Query result status for clients
260
(QR_NORMAL,
261
 QR_UNKNOWN,
262
 QR_INCOMPLETE) = range(3)
263

    
264
#: Maximum batch size for ChooseJob
265
_CHOOSE_BATCH = 25
266

    
267

    
268
class _Argument:
269
  def __init__(self, min=0, max=None): # pylint: disable=W0622
270
    self.min = min
271
    self.max = max
272

    
273
  def __repr__(self):
274
    return ("<%s min=%s max=%s>" %
275
            (self.__class__.__name__, self.min, self.max))
276

    
277

    
278
class ArgSuggest(_Argument):
279
  """Suggesting argument.
280

281
  Value can be any of the ones passed to the constructor.
282

283
  """
284
  # pylint: disable=W0622
285
  def __init__(self, min=0, max=None, choices=None):
286
    _Argument.__init__(self, min=min, max=max)
287
    self.choices = choices
288

    
289
  def __repr__(self):
290
    return ("<%s min=%s max=%s choices=%r>" %
291
            (self.__class__.__name__, self.min, self.max, self.choices))
292

    
293

    
294
class ArgChoice(ArgSuggest):
295
  """Choice argument.
296

297
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
298
  but value must be one of the choices.
299

300
  """
301

    
302

    
303
class ArgUnknown(_Argument):
304
  """Unknown argument to program (e.g. determined at runtime).
305

306
  """
307

    
308

    
309
class ArgInstance(_Argument):
310
  """Instances argument.
311

312
  """
313

    
314

    
315
class ArgNode(_Argument):
316
  """Node argument.
317

318
  """
319

    
320

    
321
class ArgGroup(_Argument):
322
  """Node group argument.
323

324
  """
325

    
326

    
327
class ArgJobId(_Argument):
328
  """Job ID argument.
329

330
  """
331

    
332

    
333
class ArgFile(_Argument):
334
  """File path argument.
335

336
  """
337

    
338

    
339
class ArgCommand(_Argument):
340
  """Command argument.
341

342
  """
343

    
344

    
345
class ArgHost(_Argument):
346
  """Host argument.
347

348
  """
349

    
350

    
351
class ArgOs(_Argument):
352
  """OS argument.
353

354
  """
355

    
356

    
357
ARGS_NONE = []
358
ARGS_MANY_INSTANCES = [ArgInstance()]
359
ARGS_MANY_NODES = [ArgNode()]
360
ARGS_MANY_GROUPS = [ArgGroup()]
361
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
362
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
363
# TODO
364
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
365
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
366

    
367

    
368
def _ExtractTagsObject(opts, args):
369
  """Extract the tag type object.
370

371
  Note that this function will modify its args parameter.
372

373
  """
374
  if not hasattr(opts, "tag_type"):
375
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
376
  kind = opts.tag_type
377
  if kind == constants.TAG_CLUSTER:
378
    retval = kind, kind
379
  elif kind in (constants.TAG_NODEGROUP,
380
                constants.TAG_NODE,
381
                constants.TAG_INSTANCE):
382
    if not args:
383
      raise errors.OpPrereqError("no arguments passed to the command")
384
    name = args.pop(0)
385
    retval = kind, name
386
  else:
387
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
388
  return retval
389

    
390

    
391
def _ExtendTags(opts, args):
392
  """Extend the args if a source file has been given.
393

394
  This function will extend the tags with the contents of the file
395
  passed in the 'tags_source' attribute of the opts parameter. A file
396
  named '-' will be replaced by stdin.
397

398
  """
399
  fname = opts.tags_source
400
  if fname is None:
401
    return
402
  if fname == "-":
403
    new_fh = sys.stdin
404
  else:
405
    new_fh = open(fname, "r")
406
  new_data = []
407
  try:
408
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
409
    # because of python bug 1633941
410
    while True:
411
      line = new_fh.readline()
412
      if not line:
413
        break
414
      new_data.append(line.strip())
415
  finally:
416
    new_fh.close()
417
  args.extend(new_data)
418

    
419

    
420
def ListTags(opts, args):
421
  """List the tags on a given object.
422

423
  This is a generic implementation that knows how to deal with all
424
  three cases of tag objects (cluster, node, instance). The opts
425
  argument is expected to contain a tag_type field denoting what
426
  object type we work on.
427

428
  """
429
  kind, name = _ExtractTagsObject(opts, args)
430
  cl = GetClient()
431
  result = cl.QueryTags(kind, name)
432
  result = list(result)
433
  result.sort()
434
  for tag in result:
435
    ToStdout(tag)
436

    
437

    
438
def AddTags(opts, args):
439
  """Add tags on a given object.
440

441
  This is a generic implementation that knows how to deal with all
442
  three cases of tag objects (cluster, node, instance). The opts
443
  argument is expected to contain a tag_type field denoting what
444
  object type we work on.
445

446
  """
447
  kind, name = _ExtractTagsObject(opts, args)
448
  _ExtendTags(opts, args)
449
  if not args:
450
    raise errors.OpPrereqError("No tags to be added")
451
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
452
  SubmitOpCode(op, opts=opts)
453

    
454

    
455
def RemoveTags(opts, args):
456
  """Remove tags from a given object.
457

458
  This is a generic implementation that knows how to deal with all
459
  three cases of tag objects (cluster, node, instance). The opts
460
  argument is expected to contain a tag_type field denoting what
461
  object type we work on.
462

463
  """
464
  kind, name = _ExtractTagsObject(opts, args)
465
  _ExtendTags(opts, args)
466
  if not args:
467
    raise errors.OpPrereqError("No tags to be removed")
468
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
469
  SubmitOpCode(op, opts=opts)
470

    
471

    
472
def check_unit(option, opt, value): # pylint: disable=W0613
473
  """OptParsers custom converter for units.
474

475
  """
476
  try:
477
    return utils.ParseUnit(value)
478
  except errors.UnitParseError, err:
479
    raise OptionValueError("option %s: %s" % (opt, err))
480

    
481

    
482
def _SplitKeyVal(opt, data):
483
  """Convert a KeyVal string into a dict.
484

485
  This function will convert a key=val[,...] string into a dict. Empty
486
  values will be converted specially: keys which have the prefix 'no_'
487
  will have the value=False and the prefix stripped, the others will
488
  have value=True.
489

490
  @type opt: string
491
  @param opt: a string holding the option name for which we process the
492
      data, used in building error messages
493
  @type data: string
494
  @param data: a string of the format key=val,key=val,...
495
  @rtype: dict
496
  @return: {key=val, key=val}
497
  @raises errors.ParameterError: if there are duplicate keys
498

499
  """
500
  kv_dict = {}
501
  if data:
502
    for elem in utils.UnescapeAndSplit(data, sep=","):
503
      if "=" in elem:
504
        key, val = elem.split("=", 1)
505
      else:
506
        if elem.startswith(NO_PREFIX):
507
          key, val = elem[len(NO_PREFIX):], False
508
        elif elem.startswith(UN_PREFIX):
509
          key, val = elem[len(UN_PREFIX):], None
510
        else:
511
          key, val = elem, True
512
      if key in kv_dict:
513
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
514
                                    (key, opt))
515
      kv_dict[key] = val
516
  return kv_dict
517

    
518

    
519
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
520
  """Custom parser for ident:key=val,key=val options.
521

522
  This will store the parsed values as a tuple (ident, {key: val}). As such,
523
  multiple uses of this option via action=append is possible.
524

525
  """
526
  if ":" not in value:
527
    ident, rest = value, ""
528
  else:
529
    ident, rest = value.split(":", 1)
530

    
531
  if ident.startswith(NO_PREFIX):
532
    if rest:
533
      msg = "Cannot pass options when removing parameter groups: %s" % value
534
      raise errors.ParameterError(msg)
535
    retval = (ident[len(NO_PREFIX):], False)
536
  elif ident.startswith(UN_PREFIX):
537
    if rest:
538
      msg = "Cannot pass options when removing parameter groups: %s" % value
539
      raise errors.ParameterError(msg)
540
    retval = (ident[len(UN_PREFIX):], None)
541
  else:
542
    kv_dict = _SplitKeyVal(opt, rest)
543
    retval = (ident, kv_dict)
544
  return retval
545

    
546

    
547
def check_key_val(option, opt, value):  # pylint: disable=W0613
548
  """Custom parser class for key=val,key=val options.
549

550
  This will store the parsed values as a dict {key: val}.
551

552
  """
553
  return _SplitKeyVal(opt, value)
554

    
555

    
556
def check_bool(option, opt, value): # pylint: disable=W0613
557
  """Custom parser for yes/no options.
558

559
  This will store the parsed value as either True or False.
560

561
  """
562
  value = value.lower()
563
  if value == constants.VALUE_FALSE or value == "no":
564
    return False
565
  elif value == constants.VALUE_TRUE or value == "yes":
566
    return True
567
  else:
568
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
569

    
570

    
571
# completion_suggestion is normally a list. Using numeric values not evaluating
572
# to False for dynamic completion.
573
(OPT_COMPL_MANY_NODES,
574
 OPT_COMPL_ONE_NODE,
575
 OPT_COMPL_ONE_INSTANCE,
576
 OPT_COMPL_ONE_OS,
577
 OPT_COMPL_ONE_IALLOCATOR,
578
 OPT_COMPL_INST_ADD_NODES,
579
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
580

    
581
OPT_COMPL_ALL = frozenset([
582
  OPT_COMPL_MANY_NODES,
583
  OPT_COMPL_ONE_NODE,
584
  OPT_COMPL_ONE_INSTANCE,
585
  OPT_COMPL_ONE_OS,
586
  OPT_COMPL_ONE_IALLOCATOR,
587
  OPT_COMPL_INST_ADD_NODES,
588
  OPT_COMPL_ONE_NODEGROUP,
589
  ])
590

    
591

    
592
class CliOption(Option):
593
  """Custom option class for optparse.
594

595
  """
596
  ATTRS = Option.ATTRS + [
597
    "completion_suggest",
598
    ]
599
  TYPES = Option.TYPES + (
600
    "identkeyval",
601
    "keyval",
602
    "unit",
603
    "bool",
604
    )
605
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
606
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
607
  TYPE_CHECKER["keyval"] = check_key_val
608
  TYPE_CHECKER["unit"] = check_unit
609
  TYPE_CHECKER["bool"] = check_bool
610

    
611

    
612
# optparse.py sets make_option, so we do it for our own option class, too
613
cli_option = CliOption
614

    
615

    
616
_YORNO = "yes|no"
617

    
618
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
619
                       help="Increase debugging level")
620

    
621
NOHDR_OPT = cli_option("--no-headers", default=False,
622
                       action="store_true", dest="no_headers",
623
                       help="Don't display column headers")
624

    
625
SEP_OPT = cli_option("--separator", default=None,
626
                     action="store", dest="separator",
627
                     help=("Separator between output fields"
628
                           " (defaults to one space)"))
629

    
630
USEUNITS_OPT = cli_option("--units", default=None,
631
                          dest="units", choices=("h", "m", "g", "t"),
632
                          help="Specify units for output (one of h/m/g/t)")
633

    
634
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
635
                        type="string", metavar="FIELDS",
636
                        help="Comma separated list of output fields")
637

    
638
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
639
                       default=False, help="Force the operation")
640

    
641
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
642
                         default=False, help="Do not require confirmation")
643

    
644
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
645
                                  action="store_true", default=False,
646
                                  help=("Ignore offline nodes and do as much"
647
                                        " as possible"))
648

    
649
TAG_ADD_OPT = cli_option("--tags", dest="tags",
650
                         default=None, help="Comma-separated list of instance"
651
                                            " tags")
652

    
653
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
654
                         default=None, help="File with tag names")
655

    
656
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
657
                        default=False, action="store_true",
658
                        help=("Submit the job and return the job ID, but"
659
                              " don't wait for the job to finish"))
660

    
661
SYNC_OPT = cli_option("--sync", dest="do_locking",
662
                      default=False, action="store_true",
663
                      help=("Grab locks while doing the queries"
664
                            " in order to ensure more consistent results"))
665

    
666
DRY_RUN_OPT = cli_option("--dry-run", default=False,
667
                         action="store_true",
668
                         help=("Do not execute the operation, just run the"
669
                               " check steps and verify it it could be"
670
                               " executed"))
671

    
672
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
673
                         action="store_true",
674
                         help="Increase the verbosity of the operation")
675

    
676
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
677
                              action="store_true", dest="simulate_errors",
678
                              help="Debugging option that makes the operation"
679
                              " treat most runtime checks as failed")
680

    
681
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
682
                        default=True, action="store_false",
683
                        help="Don't wait for sync (DANGEROUS!)")
684

    
685
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
686
                               help=("Custom disk setup (%s)" %
687
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
688
                               default=None, metavar="TEMPL",
689
                               choices=list(constants.DISK_TEMPLATES))
690

    
691
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
692
                        help="Do not create any network cards for"
693
                        " the instance")
694

    
695
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
696
                               help="Relative path under default cluster-wide"
697
                               " file storage dir to store file-based disks",
698
                               default=None, metavar="<DIR>")
699

    
700
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
701
                                  help="Driver to use for image files",
702
                                  default="loop", metavar="<DRIVER>",
703
                                  choices=list(constants.FILE_DRIVER))
704

    
705
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
706
                            help="Select nodes for the instance automatically"
707
                            " using the <NAME> iallocator plugin",
708
                            default=None, type="string",
709
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
710

    
711
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
712
                            metavar="<NAME>",
713
                            help="Set the default instance allocator plugin",
714
                            default=None, type="string",
715
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
716

    
717
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
718
                    metavar="<os>",
719
                    completion_suggest=OPT_COMPL_ONE_OS)
720

    
721
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
722
                         type="keyval", default={},
723
                         help="OS parameters")
724

    
725
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
726
                               action="store_true", default=False,
727
                               help="Force an unknown variant")
728

    
729
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
730
                            action="store_true", default=False,
731
                            help="Do not install the OS (will"
732
                            " enable no-start)")
733

    
734
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
735
                         type="keyval", default={},
736
                         help="Backend parameters")
737

    
738
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
739
                        default={}, dest="hvparams",
740
                        help="Hypervisor parameters")
741

    
742
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
743
                            help="Hypervisor and hypervisor options, in the"
744
                            " format hypervisor:option=value,option=value,...",
745
                            default=None, type="identkeyval")
746

    
747
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
748
                        help="Hypervisor and hypervisor options, in the"
749
                        " format hypervisor:option=value,option=value,...",
750
                        default=[], action="append", type="identkeyval")
751

    
752
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
753
                           action="store_false",
754
                           help="Don't check that the instance's IP"
755
                           " is alive")
756

    
757
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
758
                             default=True, action="store_false",
759
                             help="Don't check that the instance's name"
760
                             " is resolvable")
761

    
762
NET_OPT = cli_option("--net",
763
                     help="NIC parameters", default=[],
764
                     dest="nics", action="append", type="identkeyval")
765

    
766
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
767
                      dest="disks", action="append", type="identkeyval")
768

    
769
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
770
                         help="Comma-separated list of disks"
771
                         " indices to act on (e.g. 0,2) (optional,"
772
                         " defaults to all disks)")
773

    
774
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
775
                         help="Enforces a single-disk configuration using the"
776
                         " given disk size, in MiB unless a suffix is used",
777
                         default=None, type="unit", metavar="<size>")
778

    
779
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
780
                                dest="ignore_consistency",
781
                                action="store_true", default=False,
782
                                help="Ignore the consistency of the disks on"
783
                                " the secondary")
784

    
785
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
786
                                dest="allow_failover",
787
                                action="store_true", default=False,
788
                                help="If migration is not possible fallback to"
789
                                     " failover")
790

    
791
NONLIVE_OPT = cli_option("--non-live", dest="live",
792
                         default=True, action="store_false",
793
                         help="Do a non-live migration (this usually means"
794
                         " freeze the instance, save the state, transfer and"
795
                         " only then resume running on the secondary node)")
796

    
797
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
798
                                default=None,
799
                                choices=list(constants.HT_MIGRATION_MODES),
800
                                help="Override default migration mode (choose"
801
                                " either live or non-live")
802

    
803
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
804
                                help="Target node and optional secondary node",
805
                                metavar="<pnode>[:<snode>]",
806
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
807

    
808
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
809
                           action="append", metavar="<node>",
810
                           help="Use only this node (can be used multiple"
811
                           " times, if not given defaults to all nodes)",
812
                           completion_suggest=OPT_COMPL_ONE_NODE)
813

    
814
NODEGROUP_OPT_NAME = "--node-group"
815
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
816
                           dest="nodegroup",
817
                           help="Node group (name or uuid)",
818
                           metavar="<nodegroup>",
819
                           default=None, type="string",
820
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
821

    
822
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
823
                             metavar="<node>",
824
                             completion_suggest=OPT_COMPL_ONE_NODE)
825

    
826
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
827
                         action="store_false",
828
                         help="Don't start the instance after creation")
829

    
830
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
831
                         action="store_true", default=False,
832
                         help="Show command instead of executing it")
833

    
834
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
835
                         default=False, action="store_true",
836
                         help="Instead of performing the migration, try to"
837
                         " recover from a failed cleanup. This is safe"
838
                         " to run even if the instance is healthy, but it"
839
                         " will create extra replication traffic and "
840
                         " disrupt briefly the replication (like during the"
841
                         " migration")
842

    
843
STATIC_OPT = cli_option("-s", "--static", dest="static",
844
                        action="store_true", default=False,
845
                        help="Only show configuration data, not runtime data")
846

    
847
ALL_OPT = cli_option("--all", dest="show_all",
848
                     default=False, action="store_true",
849
                     help="Show info on all instances on the cluster."
850
                     " This can take a long time to run, use wisely")
851

    
852
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
853
                           action="store_true", default=False,
854
                           help="Interactive OS reinstall, lists available"
855
                           " OS templates for selection")
856

    
857
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
858
                                 action="store_true", default=False,
859
                                 help="Remove the instance from the cluster"
860
                                 " configuration even if there are failures"
861
                                 " during the removal process")
862

    
863
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
864
                                        dest="ignore_remove_failures",
865
                                        action="store_true", default=False,
866
                                        help="Remove the instance from the"
867
                                        " cluster configuration even if there"
868
                                        " are failures during the removal"
869
                                        " process")
870

    
871
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
872
                                 action="store_true", default=False,
873
                                 help="Remove the instance from the cluster")
874

    
875
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
876
                               help="Specifies the new node for the instance",
877
                               metavar="NODE", default=None,
878
                               completion_suggest=OPT_COMPL_ONE_NODE)
879

    
880
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
881
                               help="Specifies the new secondary node",
882
                               metavar="NODE", default=None,
883
                               completion_suggest=OPT_COMPL_ONE_NODE)
884

    
885
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
886
                            default=False, action="store_true",
887
                            help="Replace the disk(s) on the primary"
888
                                 " node (applies only to internally mirrored"
889
                                 " disk templates, e.g. %s)" %
890
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
891

    
892
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
893
                              default=False, action="store_true",
894
                              help="Replace the disk(s) on the secondary"
895
                                   " node (applies only to internally mirrored"
896
                                   " disk templates, e.g. %s)" %
897
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
898

    
899
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
900
                              default=False, action="store_true",
901
                              help="Lock all nodes and auto-promote as needed"
902
                              " to MC status")
903

    
904
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
905
                              default=False, action="store_true",
906
                              help="Automatically replace faulty disks"
907
                                   " (applies only to internally mirrored"
908
                                   " disk templates, e.g. %s)" %
909
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
910

    
911
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
912
                             default=False, action="store_true",
913
                             help="Ignore current recorded size"
914
                             " (useful for forcing activation when"
915
                             " the recorded size is wrong)")
916

    
917
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
918
                          metavar="<node>",
919
                          completion_suggest=OPT_COMPL_ONE_NODE)
920

    
921
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
922
                         metavar="<dir>")
923

    
924
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
925
                              help="Specify the secondary ip for the node",
926
                              metavar="ADDRESS", default=None)
927

    
928
READD_OPT = cli_option("--readd", dest="readd",
929
                       default=False, action="store_true",
930
                       help="Readd old node after replacing it")
931

    
932
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
933
                                default=True, action="store_false",
934
                                help="Disable SSH key fingerprint checking")
935

    
936
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
937
                                 default=False, action="store_true",
938
                                 help="Force the joining of a node")
939

    
940
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
941
                    type="bool", default=None, metavar=_YORNO,
942
                    help="Set the master_candidate flag on the node")
943

    
944
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
945
                         type="bool", default=None,
946
                         help=("Set the offline flag on the node"
947
                               " (cluster does not communicate with offline"
948
                               " nodes)"))
949

    
950
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
951
                         type="bool", default=None,
952
                         help=("Set the drained flag on the node"
953
                               " (excluded from allocation operations)"))
954

    
955
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
956
                    type="bool", default=None, metavar=_YORNO,
957
                    help="Set the master_capable flag on the node")
958

    
959
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
960
                    type="bool", default=None, metavar=_YORNO,
961
                    help="Set the vm_capable flag on the node")
962

    
963
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
964
                             type="bool", default=None, metavar=_YORNO,
965
                             help="Set the allocatable flag on a volume")
966

    
967
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
968
                               help="Disable support for lvm based instances"
969
                               " (cluster-wide)",
970
                               action="store_false", default=True)
971

    
972
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
973
                            dest="enabled_hypervisors",
974
                            help="Comma-separated list of hypervisors",
975
                            type="string", default=None)
976

    
977
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
978
                            type="keyval", default={},
979
                            help="NIC parameters")
980

    
981
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
982
                         dest="candidate_pool_size", type="int",
983
                         help="Set the candidate pool size")
984

    
985
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
986
                         help=("Enables LVM and specifies the volume group"
987
                               " name (cluster-wide) for disk allocation"
988
                               " [%s]" % constants.DEFAULT_VG),
989
                         metavar="VG", default=None)
990

    
991
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
992
                          help="Destroy cluster", action="store_true")
993

    
994
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
995
                          help="Skip node agreement check (dangerous)",
996
                          action="store_true", default=False)
997

    
998
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
999
                            help="Specify the mac prefix for the instance IP"
1000
                            " addresses, in the format XX:XX:XX",
1001
                            metavar="PREFIX",
1002
                            default=None)
1003

    
1004
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1005
                               help="Specify the node interface (cluster-wide)"
1006
                               " on which the master IP address will be added"
1007
                               " (cluster init default: %s)" %
1008
                               constants.DEFAULT_BRIDGE,
1009
                               metavar="NETDEV",
1010
                               default=None)
1011

    
1012
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1013
                                help="Specify the netmask of the master IP",
1014
                                metavar="NETMASK",
1015
                                default=None)
1016

    
1017
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1018
                                help="Specify the default directory (cluster-"
1019
                                "wide) for storing the file-based disks [%s]" %
1020
                                constants.DEFAULT_FILE_STORAGE_DIR,
1021
                                metavar="DIR",
1022
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1023

    
1024
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1025
                            dest="shared_file_storage_dir",
1026
                            help="Specify the default directory (cluster-"
1027
                            "wide) for storing the shared file-based"
1028
                            " disks [%s]" %
1029
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1030
                            metavar="SHAREDDIR",
1031
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1032

    
1033
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1034
                                   help="Don't modify /etc/hosts",
1035
                                   action="store_false", default=True)
1036

    
1037
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1038
                                    help="Don't initialize SSH keys",
1039
                                    action="store_false", default=True)
1040

    
1041
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1042
                             help="Enable parseable error messages",
1043
                             action="store_true", default=False)
1044

    
1045
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1046
                          help="Skip N+1 memory redundancy tests",
1047
                          action="store_true", default=False)
1048

    
1049
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1050
                             help="Type of reboot: soft/hard/full",
1051
                             default=constants.INSTANCE_REBOOT_HARD,
1052
                             metavar="<REBOOT>",
1053
                             choices=list(constants.REBOOT_TYPES))
1054

    
1055
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1056
                                    dest="ignore_secondaries",
1057
                                    default=False, action="store_true",
1058
                                    help="Ignore errors from secondaries")
1059

    
1060
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1061
                            action="store_false", default=True,
1062
                            help="Don't shutdown the instance (unsafe)")
1063

    
1064
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1065
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1066
                         help="Maximum time to wait")
1067

    
1068
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1069
                         dest="shutdown_timeout", type="int",
1070
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1071
                         help="Maximum time to wait for instance shutdown")
1072

    
1073
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1074
                          default=None,
1075
                          help=("Number of seconds between repetions of the"
1076
                                " command"))
1077

    
1078
EARLY_RELEASE_OPT = cli_option("--early-release",
1079
                               dest="early_release", default=False,
1080
                               action="store_true",
1081
                               help="Release the locks on the secondary"
1082
                               " node(s) early")
1083

    
1084
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1085
                                  dest="new_cluster_cert",
1086
                                  default=False, action="store_true",
1087
                                  help="Generate a new cluster certificate")
1088

    
1089
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1090
                           default=None,
1091
                           help="File containing new RAPI certificate")
1092

    
1093
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1094
                               default=None, action="store_true",
1095
                               help=("Generate a new self-signed RAPI"
1096
                                     " certificate"))
1097

    
1098
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1099
                           default=None,
1100
                           help="File containing new SPICE certificate")
1101

    
1102
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1103
                           default=None,
1104
                           help="File containing the certificate of the CA"
1105
                                " which signed the SPICE certificate")
1106

    
1107
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1108
                               dest="new_spice_cert", default=None,
1109
                               action="store_true",
1110
                               help=("Generate a new self-signed SPICE"
1111
                                     " certificate"))
1112

    
1113
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1114
                                    dest="new_confd_hmac_key",
1115
                                    default=False, action="store_true",
1116
                                    help=("Create a new HMAC key for %s" %
1117
                                          constants.CONFD))
1118

    
1119
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1120
                                       dest="cluster_domain_secret",
1121
                                       default=None,
1122
                                       help=("Load new new cluster domain"
1123
                                             " secret from file"))
1124

    
1125
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1126
                                           dest="new_cluster_domain_secret",
1127
                                           default=False, action="store_true",
1128
                                           help=("Create a new cluster domain"
1129
                                                 " secret"))
1130

    
1131
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1132
                              dest="use_replication_network",
1133
                              help="Whether to use the replication network"
1134
                              " for talking to the nodes",
1135
                              action="store_true", default=False)
1136

    
1137
MAINTAIN_NODE_HEALTH_OPT = \
1138
    cli_option("--maintain-node-health", dest="maintain_node_health",
1139
               metavar=_YORNO, default=None, type="bool",
1140
               help="Configure the cluster to automatically maintain node"
1141
               " health, by shutting down unknown instances, shutting down"
1142
               " unknown DRBD devices, etc.")
1143

    
1144
IDENTIFY_DEFAULTS_OPT = \
1145
    cli_option("--identify-defaults", dest="identify_defaults",
1146
               default=False, action="store_true",
1147
               help="Identify which saved instance parameters are equal to"
1148
               " the current cluster defaults and set them as such, instead"
1149
               " of marking them as overridden")
1150

    
1151
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1152
                         action="store", dest="uid_pool",
1153
                         help=("A list of user-ids or user-id"
1154
                               " ranges separated by commas"))
1155

    
1156
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1157
                          action="store", dest="add_uids",
1158
                          help=("A list of user-ids or user-id"
1159
                                " ranges separated by commas, to be"
1160
                                " added to the user-id pool"))
1161

    
1162
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1163
                             action="store", dest="remove_uids",
1164
                             help=("A list of user-ids or user-id"
1165
                                   " ranges separated by commas, to be"
1166
                                   " removed from the user-id pool"))
1167

    
1168
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1169
                             action="store", dest="reserved_lvs",
1170
                             help=("A comma-separated list of reserved"
1171
                                   " logical volumes names, that will be"
1172
                                   " ignored by cluster verify"))
1173

    
1174
ROMAN_OPT = cli_option("--roman",
1175
                       dest="roman_integers", default=False,
1176
                       action="store_true",
1177
                       help="Use roman numbers for positive integers")
1178

    
1179
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1180
                             action="store", default=None,
1181
                             help="Specifies usermode helper for DRBD")
1182

    
1183
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1184
                                action="store_false", default=True,
1185
                                help="Disable support for DRBD")
1186

    
1187
PRIMARY_IP_VERSION_OPT = \
1188
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1189
               action="store", dest="primary_ip_version",
1190
               metavar="%d|%d" % (constants.IP4_VERSION,
1191
                                  constants.IP6_VERSION),
1192
               help="Cluster-wide IP version for primary IP")
1193

    
1194
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1195
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1196
                          choices=_PRIONAME_TO_VALUE.keys(),
1197
                          help="Priority for opcode processing")
1198

    
1199
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1200
                        type="bool", default=None, metavar=_YORNO,
1201
                        help="Sets the hidden flag on the OS")
1202

    
1203
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1204
                        type="bool", default=None, metavar=_YORNO,
1205
                        help="Sets the blacklisted flag on the OS")
1206

    
1207
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1208
                                     type="bool", metavar=_YORNO,
1209
                                     dest="prealloc_wipe_disks",
1210
                                     help=("Wipe disks prior to instance"
1211
                                           " creation"))
1212

    
1213
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1214
                             type="keyval", default=None,
1215
                             help="Node parameters")
1216

    
1217
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1218
                              action="store", metavar="POLICY", default=None,
1219
                              help="Allocation policy for the node group")
1220

    
1221
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1222
                              type="bool", metavar=_YORNO,
1223
                              dest="node_powered",
1224
                              help="Specify if the SoR for node is powered")
1225

    
1226
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1227
                         default=constants.OOB_TIMEOUT,
1228
                         help="Maximum time to wait for out-of-band helper")
1229

    
1230
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1231
                             default=constants.OOB_POWER_DELAY,
1232
                             help="Time in seconds to wait between power-ons")
1233

    
1234
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1235
                              action="store_true", default=False,
1236
                              help=("Whether command argument should be treated"
1237
                                    " as filter"))
1238

    
1239
NO_REMEMBER_OPT = cli_option("--no-remember",
1240
                             dest="no_remember",
1241
                             action="store_true", default=False,
1242
                             help="Perform but do not record the change"
1243
                             " in the configuration")
1244

    
1245
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1246
                              default=False, action="store_true",
1247
                              help="Evacuate primary instances only")
1248

    
1249
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1250
                                default=False, action="store_true",
1251
                                help="Evacuate secondary instances only"
1252
                                     " (applies only to internally mirrored"
1253
                                     " disk templates, e.g. %s)" %
1254
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1255

    
1256
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1257
                                action="store_true", default=False,
1258
                                help="Pause instance at startup")
1259

    
1260
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1261
                          help="Destination node group (name or uuid)",
1262
                          default=None, action="append",
1263
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1264

    
1265

    
1266
#: Options provided by all commands
1267
COMMON_OPTS = [DEBUG_OPT]
1268

    
1269
# common options for creating instances. add and import then add their own
1270
# specific ones.
1271
COMMON_CREATE_OPTS = [
1272
  BACKEND_OPT,
1273
  DISK_OPT,
1274
  DISK_TEMPLATE_OPT,
1275
  FILESTORE_DIR_OPT,
1276
  FILESTORE_DRIVER_OPT,
1277
  HYPERVISOR_OPT,
1278
  IALLOCATOR_OPT,
1279
  NET_OPT,
1280
  NODE_PLACEMENT_OPT,
1281
  NOIPCHECK_OPT,
1282
  NONAMECHECK_OPT,
1283
  NONICS_OPT,
1284
  NWSYNC_OPT,
1285
  OSPARAMS_OPT,
1286
  OS_SIZE_OPT,
1287
  SUBMIT_OPT,
1288
  TAG_ADD_OPT,
1289
  DRY_RUN_OPT,
1290
  PRIORITY_OPT,
1291
  ]
1292

    
1293

    
1294
def _ParseArgs(argv, commands, aliases):
1295
  """Parser for the command line arguments.
1296

1297
  This function parses the arguments and returns the function which
1298
  must be executed together with its (modified) arguments.
1299

1300
  @param argv: the command line
1301
  @param commands: dictionary with special contents, see the design
1302
      doc for cmdline handling
1303
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1304

1305
  """
1306
  if len(argv) == 0:
1307
    binary = "<command>"
1308
  else:
1309
    binary = argv[0].split("/")[-1]
1310

    
1311
  if len(argv) > 1 and argv[1] == "--version":
1312
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1313
             constants.RELEASE_VERSION)
1314
    # Quit right away. That way we don't have to care about this special
1315
    # argument. optparse.py does it the same.
1316
    sys.exit(0)
1317

    
1318
  if len(argv) < 2 or not (argv[1] in commands or
1319
                           argv[1] in aliases):
1320
    # let's do a nice thing
1321
    sortedcmds = commands.keys()
1322
    sortedcmds.sort()
1323

    
1324
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1325
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1326
    ToStdout("")
1327

    
1328
    # compute the max line length for cmd + usage
1329
    mlen = max([len(" %s" % cmd) for cmd in commands])
1330
    mlen = min(60, mlen) # should not get here...
1331

    
1332
    # and format a nice command list
1333
    ToStdout("Commands:")
1334
    for cmd in sortedcmds:
1335
      cmdstr = " %s" % (cmd,)
1336
      help_text = commands[cmd][4]
1337
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1338
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1339
      for line in help_lines:
1340
        ToStdout("%-*s   %s", mlen, "", line)
1341

    
1342
    ToStdout("")
1343

    
1344
    return None, None, None
1345

    
1346
  # get command, unalias it, and look it up in commands
1347
  cmd = argv.pop(1)
1348
  if cmd in aliases:
1349
    if cmd in commands:
1350
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1351
                                   " command" % cmd)
1352

    
1353
    if aliases[cmd] not in commands:
1354
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1355
                                   " command '%s'" % (cmd, aliases[cmd]))
1356

    
1357
    cmd = aliases[cmd]
1358

    
1359
  func, args_def, parser_opts, usage, description = commands[cmd]
1360
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1361
                        description=description,
1362
                        formatter=TitledHelpFormatter(),
1363
                        usage="%%prog %s %s" % (cmd, usage))
1364
  parser.disable_interspersed_args()
1365
  options, args = parser.parse_args()
1366

    
1367
  if not _CheckArguments(cmd, args_def, args):
1368
    return None, None, None
1369

    
1370
  return func, options, args
1371

    
1372

    
1373
def _CheckArguments(cmd, args_def, args):
1374
  """Verifies the arguments using the argument definition.
1375

1376
  Algorithm:
1377

1378
    1. Abort with error if values specified by user but none expected.
1379

1380
    1. For each argument in definition
1381

1382
      1. Keep running count of minimum number of values (min_count)
1383
      1. Keep running count of maximum number of values (max_count)
1384
      1. If it has an unlimited number of values
1385

1386
        1. Abort with error if it's not the last argument in the definition
1387

1388
    1. If last argument has limited number of values
1389

1390
      1. Abort with error if number of values doesn't match or is too large
1391

1392
    1. Abort with error if user didn't pass enough values (min_count)
1393

1394
  """
1395
  if args and not args_def:
1396
    ToStderr("Error: Command %s expects no arguments", cmd)
1397
    return False
1398

    
1399
  min_count = None
1400
  max_count = None
1401
  check_max = None
1402

    
1403
  last_idx = len(args_def) - 1
1404

    
1405
  for idx, arg in enumerate(args_def):
1406
    if min_count is None:
1407
      min_count = arg.min
1408
    elif arg.min is not None:
1409
      min_count += arg.min
1410

    
1411
    if max_count is None:
1412
      max_count = arg.max
1413
    elif arg.max is not None:
1414
      max_count += arg.max
1415

    
1416
    if idx == last_idx:
1417
      check_max = (arg.max is not None)
1418

    
1419
    elif arg.max is None:
1420
      raise errors.ProgrammerError("Only the last argument can have max=None")
1421

    
1422
  if check_max:
1423
    # Command with exact number of arguments
1424
    if (min_count is not None and max_count is not None and
1425
        min_count == max_count and len(args) != min_count):
1426
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1427
      return False
1428

    
1429
    # Command with limited number of arguments
1430
    if max_count is not None and len(args) > max_count:
1431
      ToStderr("Error: Command %s expects only %d argument(s)",
1432
               cmd, max_count)
1433
      return False
1434

    
1435
  # Command with some required arguments
1436
  if min_count is not None and len(args) < min_count:
1437
    ToStderr("Error: Command %s expects at least %d argument(s)",
1438
             cmd, min_count)
1439
    return False
1440

    
1441
  return True
1442

    
1443

    
1444
def SplitNodeOption(value):
1445
  """Splits the value of a --node option.
1446

1447
  """
1448
  if value and ":" in value:
1449
    return value.split(":", 1)
1450
  else:
1451
    return (value, None)
1452

    
1453

    
1454
def CalculateOSNames(os_name, os_variants):
1455
  """Calculates all the names an OS can be called, according to its variants.
1456

1457
  @type os_name: string
1458
  @param os_name: base name of the os
1459
  @type os_variants: list or None
1460
  @param os_variants: list of supported variants
1461
  @rtype: list
1462
  @return: list of valid names
1463

1464
  """
1465
  if os_variants:
1466
    return ["%s+%s" % (os_name, v) for v in os_variants]
1467
  else:
1468
    return [os_name]
1469

    
1470

    
1471
def ParseFields(selected, default):
1472
  """Parses the values of "--field"-like options.
1473

1474
  @type selected: string or None
1475
  @param selected: User-selected options
1476
  @type default: list
1477
  @param default: Default fields
1478

1479
  """
1480
  if selected is None:
1481
    return default
1482

    
1483
  if selected.startswith("+"):
1484
    return default + selected[1:].split(",")
1485

    
1486
  return selected.split(",")
1487

    
1488

    
1489
UsesRPC = rpc.RunWithRPC
1490

    
1491

    
1492
def AskUser(text, choices=None):
1493
  """Ask the user a question.
1494

1495
  @param text: the question to ask
1496

1497
  @param choices: list with elements tuples (input_char, return_value,
1498
      description); if not given, it will default to: [('y', True,
1499
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1500
      note that the '?' char is reserved for help
1501

1502
  @return: one of the return values from the choices list; if input is
1503
      not possible (i.e. not running with a tty, we return the last
1504
      entry from the list
1505

1506
  """
1507
  if choices is None:
1508
    choices = [("y", True, "Perform the operation"),
1509
               ("n", False, "Do not perform the operation")]
1510
  if not choices or not isinstance(choices, list):
1511
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1512
  for entry in choices:
1513
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1514
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1515

    
1516
  answer = choices[-1][1]
1517
  new_text = []
1518
  for line in text.splitlines():
1519
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1520
  text = "\n".join(new_text)
1521
  try:
1522
    f = file("/dev/tty", "a+")
1523
  except IOError:
1524
    return answer
1525
  try:
1526
    chars = [entry[0] for entry in choices]
1527
    chars[-1] = "[%s]" % chars[-1]
1528
    chars.append("?")
1529
    maps = dict([(entry[0], entry[1]) for entry in choices])
1530
    while True:
1531
      f.write(text)
1532
      f.write("\n")
1533
      f.write("/".join(chars))
1534
      f.write(": ")
1535
      line = f.readline(2).strip().lower()
1536
      if line in maps:
1537
        answer = maps[line]
1538
        break
1539
      elif line == "?":
1540
        for entry in choices:
1541
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1542
        f.write("\n")
1543
        continue
1544
  finally:
1545
    f.close()
1546
  return answer
1547

    
1548

    
1549
class JobSubmittedException(Exception):
1550
  """Job was submitted, client should exit.
1551

1552
  This exception has one argument, the ID of the job that was
1553
  submitted. The handler should print this ID.
1554

1555
  This is not an error, just a structured way to exit from clients.
1556

1557
  """
1558

    
1559

    
1560
def SendJob(ops, cl=None):
1561
  """Function to submit an opcode without waiting for the results.
1562

1563
  @type ops: list
1564
  @param ops: list of opcodes
1565
  @type cl: luxi.Client
1566
  @param cl: the luxi client to use for communicating with the master;
1567
             if None, a new client will be created
1568

1569
  """
1570
  if cl is None:
1571
    cl = GetClient()
1572

    
1573
  job_id = cl.SubmitJob(ops)
1574

    
1575
  return job_id
1576

    
1577

    
1578
def GenericPollJob(job_id, cbs, report_cbs):
1579
  """Generic job-polling function.
1580

1581
  @type job_id: number
1582
  @param job_id: Job ID
1583
  @type cbs: Instance of L{JobPollCbBase}
1584
  @param cbs: Data callbacks
1585
  @type report_cbs: Instance of L{JobPollReportCbBase}
1586
  @param report_cbs: Reporting callbacks
1587

1588
  """
1589
  prev_job_info = None
1590
  prev_logmsg_serial = None
1591

    
1592
  status = None
1593

    
1594
  while True:
1595
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1596
                                      prev_logmsg_serial)
1597
    if not result:
1598
      # job not found, go away!
1599
      raise errors.JobLost("Job with id %s lost" % job_id)
1600

    
1601
    if result == constants.JOB_NOTCHANGED:
1602
      report_cbs.ReportNotChanged(job_id, status)
1603

    
1604
      # Wait again
1605
      continue
1606

    
1607
    # Split result, a tuple of (field values, log entries)
1608
    (job_info, log_entries) = result
1609
    (status, ) = job_info
1610

    
1611
    if log_entries:
1612
      for log_entry in log_entries:
1613
        (serial, timestamp, log_type, message) = log_entry
1614
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1615
                                    log_type, message)
1616
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1617

    
1618
    # TODO: Handle canceled and archived jobs
1619
    elif status in (constants.JOB_STATUS_SUCCESS,
1620
                    constants.JOB_STATUS_ERROR,
1621
                    constants.JOB_STATUS_CANCELING,
1622
                    constants.JOB_STATUS_CANCELED):
1623
      break
1624

    
1625
    prev_job_info = job_info
1626

    
1627
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1628
  if not jobs:
1629
    raise errors.JobLost("Job with id %s lost" % job_id)
1630

    
1631
  status, opstatus, result = jobs[0]
1632

    
1633
  if status == constants.JOB_STATUS_SUCCESS:
1634
    return result
1635

    
1636
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1637
    raise errors.OpExecError("Job was canceled")
1638

    
1639
  has_ok = False
1640
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1641
    if status == constants.OP_STATUS_SUCCESS:
1642
      has_ok = True
1643
    elif status == constants.OP_STATUS_ERROR:
1644
      errors.MaybeRaise(msg)
1645

    
1646
      if has_ok:
1647
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1648
                                 (idx, msg))
1649

    
1650
      raise errors.OpExecError(str(msg))
1651

    
1652
  # default failure mode
1653
  raise errors.OpExecError(result)
1654

    
1655

    
1656
class JobPollCbBase:
1657
  """Base class for L{GenericPollJob} callbacks.
1658

1659
  """
1660
  def __init__(self):
1661
    """Initializes this class.
1662

1663
    """
1664

    
1665
  def WaitForJobChangeOnce(self, job_id, fields,
1666
                           prev_job_info, prev_log_serial):
1667
    """Waits for changes on a job.
1668

1669
    """
1670
    raise NotImplementedError()
1671

    
1672
  def QueryJobs(self, job_ids, fields):
1673
    """Returns the selected fields for the selected job IDs.
1674

1675
    @type job_ids: list of numbers
1676
    @param job_ids: Job IDs
1677
    @type fields: list of strings
1678
    @param fields: Fields
1679

1680
    """
1681
    raise NotImplementedError()
1682

    
1683

    
1684
class JobPollReportCbBase:
1685
  """Base class for L{GenericPollJob} reporting callbacks.
1686

1687
  """
1688
  def __init__(self):
1689
    """Initializes this class.
1690

1691
    """
1692

    
1693
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1694
    """Handles a log message.
1695

1696
    """
1697
    raise NotImplementedError()
1698

    
1699
  def ReportNotChanged(self, job_id, status):
1700
    """Called for if a job hasn't changed in a while.
1701

1702
    @type job_id: number
1703
    @param job_id: Job ID
1704
    @type status: string or None
1705
    @param status: Job status if available
1706

1707
    """
1708
    raise NotImplementedError()
1709

    
1710

    
1711
class _LuxiJobPollCb(JobPollCbBase):
1712
  def __init__(self, cl):
1713
    """Initializes this class.
1714

1715
    """
1716
    JobPollCbBase.__init__(self)
1717
    self.cl = cl
1718

    
1719
  def WaitForJobChangeOnce(self, job_id, fields,
1720
                           prev_job_info, prev_log_serial):
1721
    """Waits for changes on a job.
1722

1723
    """
1724
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1725
                                        prev_job_info, prev_log_serial)
1726

    
1727
  def QueryJobs(self, job_ids, fields):
1728
    """Returns the selected fields for the selected job IDs.
1729

1730
    """
1731
    return self.cl.QueryJobs(job_ids, fields)
1732

    
1733

    
1734
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1735
  def __init__(self, feedback_fn):
1736
    """Initializes this class.
1737

1738
    """
1739
    JobPollReportCbBase.__init__(self)
1740

    
1741
    self.feedback_fn = feedback_fn
1742

    
1743
    assert callable(feedback_fn)
1744

    
1745
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1746
    """Handles a log message.
1747

1748
    """
1749
    self.feedback_fn((timestamp, log_type, log_msg))
1750

    
1751
  def ReportNotChanged(self, job_id, status):
1752
    """Called if a job hasn't changed in a while.
1753

1754
    """
1755
    # Ignore
1756

    
1757

    
1758
class StdioJobPollReportCb(JobPollReportCbBase):
1759
  def __init__(self):
1760
    """Initializes this class.
1761

1762
    """
1763
    JobPollReportCbBase.__init__(self)
1764

    
1765
    self.notified_queued = False
1766
    self.notified_waitlock = False
1767

    
1768
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1769
    """Handles a log message.
1770

1771
    """
1772
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1773
             FormatLogMessage(log_type, log_msg))
1774

    
1775
  def ReportNotChanged(self, job_id, status):
1776
    """Called if a job hasn't changed in a while.
1777

1778
    """
1779
    if status is None:
1780
      return
1781

    
1782
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1783
      ToStderr("Job %s is waiting in queue", job_id)
1784
      self.notified_queued = True
1785

    
1786
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1787
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1788
      self.notified_waitlock = True
1789

    
1790

    
1791
def FormatLogMessage(log_type, log_msg):
1792
  """Formats a job message according to its type.
1793

1794
  """
1795
  if log_type != constants.ELOG_MESSAGE:
1796
    log_msg = str(log_msg)
1797

    
1798
  return utils.SafeEncode(log_msg)
1799

    
1800

    
1801
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1802
  """Function to poll for the result of a job.
1803

1804
  @type job_id: job identified
1805
  @param job_id: the job to poll for results
1806
  @type cl: luxi.Client
1807
  @param cl: the luxi client to use for communicating with the master;
1808
             if None, a new client will be created
1809

1810
  """
1811
  if cl is None:
1812
    cl = GetClient()
1813

    
1814
  if reporter is None:
1815
    if feedback_fn:
1816
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1817
    else:
1818
      reporter = StdioJobPollReportCb()
1819
  elif feedback_fn:
1820
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1821

    
1822
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1823

    
1824

    
1825
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1826
  """Legacy function to submit an opcode.
1827

1828
  This is just a simple wrapper over the construction of the processor
1829
  instance. It should be extended to better handle feedback and
1830
  interaction functions.
1831

1832
  """
1833
  if cl is None:
1834
    cl = GetClient()
1835

    
1836
  SetGenericOpcodeOpts([op], opts)
1837

    
1838
  job_id = SendJob([op], cl=cl)
1839

    
1840
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1841
                       reporter=reporter)
1842

    
1843
  return op_results[0]
1844

    
1845

    
1846
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1847
  """Wrapper around SubmitOpCode or SendJob.
1848

1849
  This function will decide, based on the 'opts' parameter, whether to
1850
  submit and wait for the result of the opcode (and return it), or
1851
  whether to just send the job and print its identifier. It is used in
1852
  order to simplify the implementation of the '--submit' option.
1853

1854
  It will also process the opcodes if we're sending the via SendJob
1855
  (otherwise SubmitOpCode does it).
1856

1857
  """
1858
  if opts and opts.submit_only:
1859
    job = [op]
1860
    SetGenericOpcodeOpts(job, opts)
1861
    job_id = SendJob(job, cl=cl)
1862
    raise JobSubmittedException(job_id)
1863
  else:
1864
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1865

    
1866

    
1867
def SetGenericOpcodeOpts(opcode_list, options):
1868
  """Processor for generic options.
1869

1870
  This function updates the given opcodes based on generic command
1871
  line options (like debug, dry-run, etc.).
1872

1873
  @param opcode_list: list of opcodes
1874
  @param options: command line options or None
1875
  @return: None (in-place modification)
1876

1877
  """
1878
  if not options:
1879
    return
1880
  for op in opcode_list:
1881
    op.debug_level = options.debug
1882
    if hasattr(options, "dry_run"):
1883
      op.dry_run = options.dry_run
1884
    if getattr(options, "priority", None) is not None:
1885
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1886

    
1887

    
1888
def GetClient():
1889
  # TODO: Cache object?
1890
  try:
1891
    client = luxi.Client()
1892
  except luxi.NoMasterError:
1893
    ss = ssconf.SimpleStore()
1894

    
1895
    # Try to read ssconf file
1896
    try:
1897
      ss.GetMasterNode()
1898
    except errors.ConfigurationError:
1899
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1900
                                 " not part of a cluster")
1901

    
1902
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1903
    if master != myself:
1904
      raise errors.OpPrereqError("This is not the master node, please connect"
1905
                                 " to node '%s' and rerun the command" %
1906
                                 master)
1907
    raise
1908
  return client
1909

    
1910

    
1911
def FormatError(err):
1912
  """Return a formatted error message for a given error.
1913

1914
  This function takes an exception instance and returns a tuple
1915
  consisting of two values: first, the recommended exit code, and
1916
  second, a string describing the error message (not
1917
  newline-terminated).
1918

1919
  """
1920
  retcode = 1
1921
  obuf = StringIO()
1922
  msg = str(err)
1923
  if isinstance(err, errors.ConfigurationError):
1924
    txt = "Corrupt configuration file: %s" % msg
1925
    logging.error(txt)
1926
    obuf.write(txt + "\n")
1927
    obuf.write("Aborting.")
1928
    retcode = 2
1929
  elif isinstance(err, errors.HooksAbort):
1930
    obuf.write("Failure: hooks execution failed:\n")
1931
    for node, script, out in err.args[0]:
1932
      if out:
1933
        obuf.write("  node: %s, script: %s, output: %s\n" %
1934
                   (node, script, out))
1935
      else:
1936
        obuf.write("  node: %s, script: %s (no output)\n" %
1937
                   (node, script))
1938
  elif isinstance(err, errors.HooksFailure):
1939
    obuf.write("Failure: hooks general failure: %s" % msg)
1940
  elif isinstance(err, errors.ResolverError):
1941
    this_host = netutils.Hostname.GetSysName()
1942
    if err.args[0] == this_host:
1943
      msg = "Failure: can't resolve my own hostname ('%s')"
1944
    else:
1945
      msg = "Failure: can't resolve hostname '%s'"
1946
    obuf.write(msg % err.args[0])
1947
  elif isinstance(err, errors.OpPrereqError):
1948
    if len(err.args) == 2:
1949
      obuf.write("Failure: prerequisites not met for this"
1950
               " operation:\nerror type: %s, error details:\n%s" %
1951
                 (err.args[1], err.args[0]))
1952
    else:
1953
      obuf.write("Failure: prerequisites not met for this"
1954
                 " operation:\n%s" % msg)
1955
  elif isinstance(err, errors.OpExecError):
1956
    obuf.write("Failure: command execution error:\n%s" % msg)
1957
  elif isinstance(err, errors.TagError):
1958
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
1959
  elif isinstance(err, errors.JobQueueDrainError):
1960
    obuf.write("Failure: the job queue is marked for drain and doesn't"
1961
               " accept new requests\n")
1962
  elif isinstance(err, errors.JobQueueFull):
1963
    obuf.write("Failure: the job queue is full and doesn't accept new"
1964
               " job submissions until old jobs are archived\n")
1965
  elif isinstance(err, errors.TypeEnforcementError):
1966
    obuf.write("Parameter Error: %s" % msg)
1967
  elif isinstance(err, errors.ParameterError):
1968
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
1969
  elif isinstance(err, luxi.NoMasterError):
1970
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
1971
               " and listening for connections?")
1972
  elif isinstance(err, luxi.TimeoutError):
1973
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
1974
               " been submitted and will continue to run even if the call"
1975
               " timed out. Useful commands in this situation are \"gnt-job"
1976
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
1977
    obuf.write(msg)
1978
  elif isinstance(err, luxi.PermissionError):
1979
    obuf.write("It seems you don't have permissions to connect to the"
1980
               " master daemon.\nPlease retry as a different user.")
1981
  elif isinstance(err, luxi.ProtocolError):
1982
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
1983
               "%s" % msg)
1984
  elif isinstance(err, errors.JobLost):
1985
    obuf.write("Error checking job status: %s" % msg)
1986
  elif isinstance(err, errors.QueryFilterParseError):
1987
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
1988
    obuf.write("\n".join(err.GetDetails()))
1989
  elif isinstance(err, errors.GenericError):
1990
    obuf.write("Unhandled Ganeti error: %s" % msg)
1991
  elif isinstance(err, JobSubmittedException):
1992
    obuf.write("JobID: %s\n" % err.args[0])
1993
    retcode = 0
1994
  else:
1995
    obuf.write("Unhandled exception: %s" % msg)
1996
  return retcode, obuf.getvalue().rstrip("\n")
1997

    
1998

    
1999
def GenericMain(commands, override=None, aliases=None):
2000
  """Generic main function for all the gnt-* commands.
2001

2002
  Arguments:
2003
    - commands: a dictionary with a special structure, see the design doc
2004
                for command line handling.
2005
    - override: if not None, we expect a dictionary with keys that will
2006
                override command line options; this can be used to pass
2007
                options from the scripts to generic functions
2008
    - aliases: dictionary with command aliases {'alias': 'target, ...}
2009

2010
  """
2011
  # save the program name and the entire command line for later logging
2012
  if sys.argv:
2013
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2014
    if len(sys.argv) >= 2:
2015
      binary += " " + sys.argv[1]
2016
      old_cmdline = " ".join(sys.argv[2:])
2017
    else:
2018
      old_cmdline = ""
2019
  else:
2020
    binary = "<unknown program>"
2021
    old_cmdline = ""
2022

    
2023
  if aliases is None:
2024
    aliases = {}
2025

    
2026
  try:
2027
    func, options, args = _ParseArgs(sys.argv, commands, aliases)
2028
  except errors.ParameterError, err:
2029
    result, err_msg = FormatError(err)
2030
    ToStderr(err_msg)
2031
    return 1
2032

    
2033
  if func is None: # parse error
2034
    return 1
2035

    
2036
  if override is not None:
2037
    for key, val in override.iteritems():
2038
      setattr(options, key, val)
2039

    
2040
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2041
                     stderr_logging=True)
2042

    
2043
  if old_cmdline:
2044
    logging.info("run with arguments '%s'", old_cmdline)
2045
  else:
2046
    logging.info("run with no arguments")
2047

    
2048
  try:
2049
    result = func(options, args)
2050
  except (errors.GenericError, luxi.ProtocolError,
2051
          JobSubmittedException), err:
2052
    result, err_msg = FormatError(err)
2053
    logging.exception("Error during command processing")
2054
    ToStderr(err_msg)
2055
  except KeyboardInterrupt:
2056
    result = constants.EXIT_FAILURE
2057
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2058
             " might have been submitted and"
2059
             " will continue to run in the background.")
2060
  except IOError, err:
2061
    if err.errno == errno.EPIPE:
2062
      # our terminal went away, we'll exit
2063
      sys.exit(constants.EXIT_FAILURE)
2064
    else:
2065
      raise
2066

    
2067
  return result
2068

    
2069

    
2070
def ParseNicOption(optvalue):
2071
  """Parses the value of the --net option(s).
2072

2073
  """
2074
  try:
2075
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2076
  except (TypeError, ValueError), err:
2077
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2078

    
2079
  nics = [{}] * nic_max
2080
  for nidx, ndict in optvalue:
2081
    nidx = int(nidx)
2082

    
2083
    if not isinstance(ndict, dict):
2084
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2085
                                 " got %s" % (nidx, ndict))
2086

    
2087
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2088

    
2089
    nics[nidx] = ndict
2090

    
2091
  return nics
2092

    
2093

    
2094
def GenericInstanceCreate(mode, opts, args):
2095
  """Add an instance to the cluster via either creation or import.
2096

2097
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2098
  @param opts: the command line options selected by the user
2099
  @type args: list
2100
  @param args: should contain only one element, the new instance name
2101
  @rtype: int
2102
  @return: the desired exit code
2103

2104
  """
2105
  instance = args[0]
2106

    
2107
  (pnode, snode) = SplitNodeOption(opts.node)
2108

    
2109
  hypervisor = None
2110
  hvparams = {}
2111
  if opts.hypervisor:
2112
    hypervisor, hvparams = opts.hypervisor
2113

    
2114
  if opts.nics:
2115
    nics = ParseNicOption(opts.nics)
2116
  elif opts.no_nics:
2117
    # no nics
2118
    nics = []
2119
  elif mode == constants.INSTANCE_CREATE:
2120
    # default of one nic, all auto
2121
    nics = [{}]
2122
  else:
2123
    # mode == import
2124
    nics = []
2125

    
2126
  if opts.disk_template == constants.DT_DISKLESS:
2127
    if opts.disks or opts.sd_size is not None:
2128
      raise errors.OpPrereqError("Diskless instance but disk"
2129
                                 " information passed")
2130
    disks = []
2131
  else:
2132
    if (not opts.disks and not opts.sd_size
2133
        and mode == constants.INSTANCE_CREATE):
2134
      raise errors.OpPrereqError("No disk information specified")
2135
    if opts.disks and opts.sd_size is not None:
2136
      raise errors.OpPrereqError("Please use either the '--disk' or"
2137
                                 " '-s' option")
2138
    if opts.sd_size is not None:
2139
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2140

    
2141
    if opts.disks:
2142
      try:
2143
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2144
      except ValueError, err:
2145
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2146
      disks = [{}] * disk_max
2147
    else:
2148
      disks = []
2149
    for didx, ddict in opts.disks:
2150
      didx = int(didx)
2151
      if not isinstance(ddict, dict):
2152
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2153
        raise errors.OpPrereqError(msg)
2154
      elif constants.IDISK_SIZE in ddict:
2155
        if constants.IDISK_ADOPT in ddict:
2156
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2157
                                     " (disk %d)" % didx)
2158
        try:
2159
          ddict[constants.IDISK_SIZE] = \
2160
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2161
        except ValueError, err:
2162
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2163
                                     (didx, err))
2164
      elif constants.IDISK_ADOPT in ddict:
2165
        if mode == constants.INSTANCE_IMPORT:
2166
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2167
                                     " import")
2168
        ddict[constants.IDISK_SIZE] = 0
2169
      else:
2170
        raise errors.OpPrereqError("Missing size or adoption source for"
2171
                                   " disk %d" % didx)
2172
      disks[didx] = ddict
2173

    
2174
  if opts.tags is not None:
2175
    tags = opts.tags.split(",")
2176
  else:
2177
    tags = []
2178

    
2179
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2180
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2181

    
2182
  if mode == constants.INSTANCE_CREATE:
2183
    start = opts.start
2184
    os_type = opts.os
2185
    force_variant = opts.force_variant
2186
    src_node = None
2187
    src_path = None
2188
    no_install = opts.no_install
2189
    identify_defaults = False
2190
  elif mode == constants.INSTANCE_IMPORT:
2191
    start = False
2192
    os_type = None
2193
    force_variant = False
2194
    src_node = opts.src_node
2195
    src_path = opts.src_dir
2196
    no_install = None
2197
    identify_defaults = opts.identify_defaults
2198
  else:
2199
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2200

    
2201
  op = opcodes.OpInstanceCreate(instance_name=instance,
2202
                                disks=disks,
2203
                                disk_template=opts.disk_template,
2204
                                nics=nics,
2205
                                pnode=pnode, snode=snode,
2206
                                ip_check=opts.ip_check,
2207
                                name_check=opts.name_check,
2208
                                wait_for_sync=opts.wait_for_sync,
2209
                                file_storage_dir=opts.file_storage_dir,
2210
                                file_driver=opts.file_driver,
2211
                                iallocator=opts.iallocator,
2212
                                hypervisor=hypervisor,
2213
                                hvparams=hvparams,
2214
                                beparams=opts.beparams,
2215
                                osparams=opts.osparams,
2216
                                mode=mode,
2217
                                start=start,
2218
                                os_type=os_type,
2219
                                force_variant=force_variant,
2220
                                src_node=src_node,
2221
                                src_path=src_path,
2222
                                tags=tags,
2223
                                no_install=no_install,
2224
                                identify_defaults=identify_defaults)
2225

    
2226
  SubmitOrSend(op, opts)
2227
  return 0
2228

    
2229

    
2230
class _RunWhileClusterStoppedHelper:
2231
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2232

2233
  """
2234
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2235
    """Initializes this class.
2236

2237
    @type feedback_fn: callable
2238
    @param feedback_fn: Feedback function
2239
    @type cluster_name: string
2240
    @param cluster_name: Cluster name
2241
    @type master_node: string
2242
    @param master_node Master node name
2243
    @type online_nodes: list
2244
    @param online_nodes: List of names of online nodes
2245

2246
    """
2247
    self.feedback_fn = feedback_fn
2248
    self.cluster_name = cluster_name
2249
    self.master_node = master_node
2250
    self.online_nodes = online_nodes
2251

    
2252
    self.ssh = ssh.SshRunner(self.cluster_name)
2253

    
2254
    self.nonmaster_nodes = [name for name in online_nodes
2255
                            if name != master_node]
2256

    
2257
    assert self.master_node not in self.nonmaster_nodes
2258

    
2259
  def _RunCmd(self, node_name, cmd):
2260
    """Runs a command on the local or a remote machine.
2261

2262
    @type node_name: string
2263
    @param node_name: Machine name
2264
    @type cmd: list
2265
    @param cmd: Command
2266

2267
    """
2268
    if node_name is None or node_name == self.master_node:
2269
      # No need to use SSH
2270
      result = utils.RunCmd(cmd)
2271
    else:
2272
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2273

    
2274
    if result.failed:
2275
      errmsg = ["Failed to run command %s" % result.cmd]
2276
      if node_name:
2277
        errmsg.append("on node %s" % node_name)
2278
      errmsg.append(": exitcode %s and error %s" %
2279
                    (result.exit_code, result.output))
2280
      raise errors.OpExecError(" ".join(errmsg))
2281

    
2282
  def Call(self, fn, *args):
2283
    """Call function while all daemons are stopped.
2284

2285
    @type fn: callable
2286
    @param fn: Function to be called
2287

2288
    """
2289
    # Pause watcher by acquiring an exclusive lock on watcher state file
2290
    self.feedback_fn("Blocking watcher")
2291
    watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2292
    try:
2293
      # TODO: Currently, this just blocks. There's no timeout.
2294
      # TODO: Should it be a shared lock?
2295
      watcher_block.Exclusive(blocking=True)
2296

    
2297
      # Stop master daemons, so that no new jobs can come in and all running
2298
      # ones are finished
2299
      self.feedback_fn("Stopping master daemons")
2300
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2301
      try:
2302
        # Stop daemons on all nodes
2303
        for node_name in self.online_nodes:
2304
          self.feedback_fn("Stopping daemons on %s" % node_name)
2305
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2306

    
2307
        # All daemons are shut down now
2308
        try:
2309
          return fn(self, *args)
2310
        except Exception, err:
2311
          _, errmsg = FormatError(err)
2312
          logging.exception("Caught exception")
2313
          self.feedback_fn(errmsg)
2314
          raise
2315
      finally:
2316
        # Start cluster again, master node last
2317
        for node_name in self.nonmaster_nodes + [self.master_node]:
2318
          self.feedback_fn("Starting daemons on %s" % node_name)
2319
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2320
    finally:
2321
      # Resume watcher
2322
      watcher_block.Close()
2323

    
2324

    
2325
def RunWhileClusterStopped(feedback_fn, fn, *args):
2326
  """Calls a function while all cluster daemons are stopped.
2327

2328
  @type feedback_fn: callable
2329
  @param feedback_fn: Feedback function
2330
  @type fn: callable
2331
  @param fn: Function to be called when daemons are stopped
2332

2333
  """
2334
  feedback_fn("Gathering cluster information")
2335

    
2336
  # This ensures we're running on the master daemon
2337
  cl = GetClient()
2338

    
2339
  (cluster_name, master_node) = \
2340
    cl.QueryConfigValues(["cluster_name", "master_node"])
2341

    
2342
  online_nodes = GetOnlineNodes([], cl=cl)
2343

    
2344
  # Don't keep a reference to the client. The master daemon will go away.
2345
  del cl
2346

    
2347
  assert master_node in online_nodes
2348

    
2349
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2350
                                       online_nodes).Call(fn, *args)
2351

    
2352

    
2353
def GenerateTable(headers, fields, separator, data,
2354
                  numfields=None, unitfields=None,
2355
                  units=None):
2356
  """Prints a table with headers and different fields.
2357

2358
  @type headers: dict
2359
  @param headers: dictionary mapping field names to headers for
2360
      the table
2361
  @type fields: list
2362
  @param fields: the field names corresponding to each row in
2363
      the data field
2364
  @param separator: the separator to be used; if this is None,
2365
      the default 'smart' algorithm is used which computes optimal
2366
      field width, otherwise just the separator is used between
2367
      each field
2368
  @type data: list
2369
  @param data: a list of lists, each sublist being one row to be output
2370
  @type numfields: list
2371
  @param numfields: a list with the fields that hold numeric
2372
      values and thus should be right-aligned
2373
  @type unitfields: list
2374
  @param unitfields: a list with the fields that hold numeric
2375
      values that should be formatted with the units field
2376
  @type units: string or None
2377
  @param units: the units we should use for formatting, or None for
2378
      automatic choice (human-readable for non-separator usage, otherwise
2379
      megabytes); this is a one-letter string
2380

2381
  """
2382
  if units is None:
2383
    if separator:
2384
      units = "m"
2385
    else:
2386
      units = "h"
2387

    
2388
  if numfields is None:
2389
    numfields = []
2390
  if unitfields is None:
2391
    unitfields = []
2392

    
2393
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2394
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2395

    
2396
  format_fields = []
2397
  for field in fields:
2398
    if headers and field not in headers:
2399
      # TODO: handle better unknown fields (either revert to old
2400
      # style of raising exception, or deal more intelligently with
2401
      # variable fields)
2402
      headers[field] = field
2403
    if separator is not None:
2404
      format_fields.append("%s")
2405
    elif numfields.Matches(field):
2406
      format_fields.append("%*s")
2407
    else:
2408
      format_fields.append("%-*s")
2409

    
2410
  if separator is None:
2411
    mlens = [0 for name in fields]
2412
    format_str = " ".join(format_fields)
2413
  else:
2414
    format_str = separator.replace("%", "%%").join(format_fields)
2415

    
2416
  for row in data:
2417
    if row is None:
2418
      continue
2419
    for idx, val in enumerate(row):
2420
      if unitfields.Matches(fields[idx]):
2421
        try:
2422
          val = int(val)
2423
        except (TypeError, ValueError):
2424
          pass
2425
        else:
2426
          val = row[idx] = utils.FormatUnit(val, units)
2427
      val = row[idx] = str(val)
2428
      if separator is None:
2429
        mlens[idx] = max(mlens[idx], len(val))
2430

    
2431
  result = []
2432
  if headers:
2433
    args = []
2434
    for idx, name in enumerate(fields):
2435
      hdr = headers[name]
2436
      if separator is None:
2437
        mlens[idx] = max(mlens[idx], len(hdr))
2438
        args.append(mlens[idx])
2439
      args.append(hdr)
2440
    result.append(format_str % tuple(args))
2441

    
2442
  if separator is None:
2443
    assert len(mlens) == len(fields)
2444

    
2445
    if fields and not numfields.Matches(fields[-1]):
2446
      mlens[-1] = 0
2447

    
2448
  for line in data:
2449
    args = []
2450
    if line is None:
2451
      line = ["-" for _ in fields]
2452
    for idx in range(len(fields)):
2453
      if separator is None:
2454
        args.append(mlens[idx])
2455
      args.append(line[idx])
2456
    result.append(format_str % tuple(args))
2457

    
2458
  return result
2459

    
2460

    
2461
def _FormatBool(value):
2462
  """Formats a boolean value as a string.
2463

2464
  """
2465
  if value:
2466
    return "Y"
2467
  return "N"
2468

    
2469

    
2470
#: Default formatting for query results; (callback, align right)
2471
_DEFAULT_FORMAT_QUERY = {
2472
  constants.QFT_TEXT: (str, False),
2473
  constants.QFT_BOOL: (_FormatBool, False),
2474
  constants.QFT_NUMBER: (str, True),
2475
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2476
  constants.QFT_OTHER: (str, False),
2477
  constants.QFT_UNKNOWN: (str, False),
2478
  }
2479

    
2480

    
2481
def _GetColumnFormatter(fdef, override, unit):
2482
  """Returns formatting function for a field.
2483

2484
  @type fdef: L{objects.QueryFieldDefinition}
2485
  @type override: dict
2486
  @param override: Dictionary for overriding field formatting functions,
2487
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2488
  @type unit: string
2489
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2490
  @rtype: tuple; (callable, bool)
2491
  @return: Returns the function to format a value (takes one parameter) and a
2492
    boolean for aligning the value on the right-hand side
2493

2494
  """
2495
  fmt = override.get(fdef.name, None)
2496
  if fmt is not None:
2497
    return fmt
2498

    
2499
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2500

    
2501
  if fdef.kind == constants.QFT_UNIT:
2502
    # Can't keep this information in the static dictionary
2503
    return (lambda value: utils.FormatUnit(value, unit), True)
2504

    
2505
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2506
  if fmt is not None:
2507
    return fmt
2508

    
2509
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2510

    
2511

    
2512
class _QueryColumnFormatter:
2513
  """Callable class for formatting fields of a query.
2514

2515
  """
2516
  def __init__(self, fn, status_fn, verbose):
2517
    """Initializes this class.
2518

2519
    @type fn: callable
2520
    @param fn: Formatting function
2521
    @type status_fn: callable
2522
    @param status_fn: Function to report fields' status
2523
    @type verbose: boolean
2524
    @param verbose: whether to use verbose field descriptions or not
2525

2526
    """
2527
    self._fn = fn
2528
    self._status_fn = status_fn
2529
    self._verbose = verbose
2530

    
2531
  def __call__(self, data):
2532
    """Returns a field's string representation.
2533

2534
    """
2535
    (status, value) = data
2536

    
2537
    # Report status
2538
    self._status_fn(status)
2539

    
2540
    if status == constants.RS_NORMAL:
2541
      return self._fn(value)
2542

    
2543
    assert value is None, \
2544
           "Found value %r for abnormal status %s" % (value, status)
2545

    
2546
    return FormatResultError(status, self._verbose)
2547

    
2548

    
2549
def FormatResultError(status, verbose):
2550
  """Formats result status other than L{constants.RS_NORMAL}.
2551

2552
  @param status: The result status
2553
  @type verbose: boolean
2554
  @param verbose: Whether to return the verbose text
2555
  @return: Text of result status
2556

2557
  """
2558
  assert status != constants.RS_NORMAL, \
2559
         "FormatResultError called with status equal to constants.RS_NORMAL"
2560
  try:
2561
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2562
  except KeyError:
2563
    raise NotImplementedError("Unknown status %s" % status)
2564
  else:
2565
    if verbose:
2566
      return verbose_text
2567
    return normal_text
2568

    
2569

    
2570
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2571
                      header=False, verbose=False):
2572
  """Formats data in L{objects.QueryResponse}.
2573

2574
  @type result: L{objects.QueryResponse}
2575
  @param result: result of query operation
2576
  @type unit: string
2577
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2578
    see L{utils.text.FormatUnit}
2579
  @type format_override: dict
2580
  @param format_override: Dictionary for overriding field formatting functions,
2581
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2582
  @type separator: string or None
2583
  @param separator: String used to separate fields
2584
  @type header: bool
2585
  @param header: Whether to output header row
2586
  @type verbose: boolean
2587
  @param verbose: whether to use verbose field descriptions or not
2588

2589
  """
2590
  if unit is None:
2591
    if separator:
2592
      unit = "m"
2593
    else:
2594
      unit = "h"
2595

    
2596
  if format_override is None:
2597
    format_override = {}
2598

    
2599
  stats = dict.fromkeys(constants.RS_ALL, 0)
2600

    
2601
  def _RecordStatus(status):
2602
    if status in stats:
2603
      stats[status] += 1
2604

    
2605
  columns = []
2606
  for fdef in result.fields:
2607
    assert fdef.title and fdef.name
2608
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2609
    columns.append(TableColumn(fdef.title,
2610
                               _QueryColumnFormatter(fn, _RecordStatus,
2611
                                                     verbose),
2612
                               align_right))
2613

    
2614
  table = FormatTable(result.data, columns, header, separator)
2615

    
2616
  # Collect statistics
2617
  assert len(stats) == len(constants.RS_ALL)
2618
  assert compat.all(count >= 0 for count in stats.values())
2619

    
2620
  # Determine overall status. If there was no data, unknown fields must be
2621
  # detected via the field definitions.
2622
  if (stats[constants.RS_UNKNOWN] or
2623
      (not result.data and _GetUnknownFields(result.fields))):
2624
    status = QR_UNKNOWN
2625
  elif compat.any(count > 0 for key, count in stats.items()
2626
                  if key != constants.RS_NORMAL):
2627
    status = QR_INCOMPLETE
2628
  else:
2629
    status = QR_NORMAL
2630

    
2631
  return (status, table)
2632

    
2633

    
2634
def _GetUnknownFields(fdefs):
2635
  """Returns list of unknown fields included in C{fdefs}.
2636

2637
  @type fdefs: list of L{objects.QueryFieldDefinition}
2638

2639
  """
2640
  return [fdef for fdef in fdefs
2641
          if fdef.kind == constants.QFT_UNKNOWN]
2642

    
2643

    
2644
def _WarnUnknownFields(fdefs):
2645
  """Prints a warning to stderr if a query included unknown fields.
2646

2647
  @type fdefs: list of L{objects.QueryFieldDefinition}
2648

2649
  """
2650
  unknown = _GetUnknownFields(fdefs)
2651
  if unknown:
2652
    ToStderr("Warning: Queried for unknown fields %s",
2653
             utils.CommaJoin(fdef.name for fdef in unknown))
2654
    return True
2655

    
2656
  return False
2657

    
2658

    
2659
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2660
                format_override=None, verbose=False, force_filter=False):
2661
  """Generic implementation for listing all items of a resource.
2662

2663
  @param resource: One of L{constants.QR_VIA_LUXI}
2664
  @type fields: list of strings
2665
  @param fields: List of fields to query for
2666
  @type names: list of strings
2667
  @param names: Names of items to query for
2668
  @type unit: string or None
2669
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2670
    None for automatic choice (human-readable for non-separator usage,
2671
    otherwise megabytes); this is a one-letter string
2672
  @type separator: string or None
2673
  @param separator: String used to separate fields
2674
  @type header: bool
2675
  @param header: Whether to show header row
2676
  @type force_filter: bool
2677
  @param force_filter: Whether to always treat names as filter
2678
  @type format_override: dict
2679
  @param format_override: Dictionary for overriding field formatting functions,
2680
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2681
  @type verbose: boolean
2682
  @param verbose: whether to use verbose field descriptions or not
2683

2684
  """
2685
  if cl is None:
2686
    cl = GetClient()
2687

    
2688
  if not names:
2689
    names = None
2690

    
2691
  filter_ = qlang.MakeFilter(names, force_filter)
2692

    
2693
  response = cl.Query(resource, fields, filter_)
2694

    
2695
  found_unknown = _WarnUnknownFields(response.fields)
2696

    
2697
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2698
                                     header=header,
2699
                                     format_override=format_override,
2700
                                     verbose=verbose)
2701

    
2702
  for line in data:
2703
    ToStdout(line)
2704

    
2705
  assert ((found_unknown and status == QR_UNKNOWN) or
2706
          (not found_unknown and status != QR_UNKNOWN))
2707

    
2708
  if status == QR_UNKNOWN:
2709
    return constants.EXIT_UNKNOWN_FIELD
2710

    
2711
  # TODO: Should the list command fail if not all data could be collected?
2712
  return constants.EXIT_SUCCESS
2713

    
2714

    
2715
def GenericListFields(resource, fields, separator, header, cl=None):
2716
  """Generic implementation for listing fields for a resource.
2717

2718
  @param resource: One of L{constants.QR_VIA_LUXI}
2719
  @type fields: list of strings
2720
  @param fields: List of fields to query for
2721
  @type separator: string or None
2722
  @param separator: String used to separate fields
2723
  @type header: bool
2724
  @param header: Whether to show header row
2725

2726
  """
2727
  if cl is None:
2728
    cl = GetClient()
2729

    
2730
  if not fields:
2731
    fields = None
2732

    
2733
  response = cl.QueryFields(resource, fields)
2734

    
2735
  found_unknown = _WarnUnknownFields(response.fields)
2736

    
2737
  columns = [
2738
    TableColumn("Name", str, False),
2739
    TableColumn("Title", str, False),
2740
    TableColumn("Description", str, False),
2741
    ]
2742

    
2743
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2744

    
2745
  for line in FormatTable(rows, columns, header, separator):
2746
    ToStdout(line)
2747

    
2748
  if found_unknown:
2749
    return constants.EXIT_UNKNOWN_FIELD
2750

    
2751
  return constants.EXIT_SUCCESS
2752

    
2753

    
2754
class TableColumn:
2755
  """Describes a column for L{FormatTable}.
2756

2757
  """
2758
  def __init__(self, title, fn, align_right):
2759
    """Initializes this class.
2760

2761
    @type title: string
2762
    @param title: Column title
2763
    @type fn: callable
2764
    @param fn: Formatting function
2765
    @type align_right: bool
2766
    @param align_right: Whether to align values on the right-hand side
2767

2768
    """
2769
    self.title = title
2770
    self.format = fn
2771
    self.align_right = align_right
2772

    
2773

    
2774
def _GetColFormatString(width, align_right):
2775
  """Returns the format string for a field.
2776

2777
  """
2778
  if align_right:
2779
    sign = ""
2780
  else:
2781
    sign = "-"
2782

    
2783
  return "%%%s%ss" % (sign, width)
2784

    
2785

    
2786
def FormatTable(rows, columns, header, separator):
2787
  """Formats data as a table.
2788

2789
  @type rows: list of lists
2790
  @param rows: Row data, one list per row
2791
  @type columns: list of L{TableColumn}
2792
  @param columns: Column descriptions
2793
  @type header: bool
2794
  @param header: Whether to show header row
2795
  @type separator: string or None
2796
  @param separator: String used to separate columns
2797

2798
  """
2799
  if header:
2800
    data = [[col.title for col in columns]]
2801
    colwidth = [len(col.title) for col in columns]
2802
  else:
2803
    data = []
2804
    colwidth = [0 for _ in columns]
2805

    
2806
  # Format row data
2807
  for row in rows:
2808
    assert len(row) == len(columns)
2809

    
2810
    formatted = [col.format(value) for value, col in zip(row, columns)]
2811

    
2812
    if separator is None:
2813
      # Update column widths
2814
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2815
        # Modifying a list's items while iterating is fine
2816
        colwidth[idx] = max(oldwidth, len(value))
2817

    
2818
    data.append(formatted)
2819

    
2820
  if separator is not None:
2821
    # Return early if a separator is used
2822
    return [separator.join(row) for row in data]
2823

    
2824
  if columns and not columns[-1].align_right:
2825
    # Avoid unnecessary spaces at end of line
2826
    colwidth[-1] = 0
2827

    
2828
  # Build format string
2829
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2830
                  for col, width in zip(columns, colwidth)])
2831

    
2832
  return [fmt % tuple(row) for row in data]
2833

    
2834

    
2835
def FormatTimestamp(ts):
2836
  """Formats a given timestamp.
2837

2838
  @type ts: timestamp
2839
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2840

2841
  @rtype: string
2842
  @return: a string with the formatted timestamp
2843

2844
  """
2845
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2846
    return "?"
2847
  sec, usec = ts
2848
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2849

    
2850

    
2851
def ParseTimespec(value):
2852
  """Parse a time specification.
2853

2854
  The following suffixed will be recognized:
2855

2856
    - s: seconds
2857
    - m: minutes
2858
    - h: hours
2859
    - d: day
2860
    - w: weeks
2861

2862
  Without any suffix, the value will be taken to be in seconds.
2863

2864
  """
2865
  value = str(value)
2866
  if not value:
2867
    raise errors.OpPrereqError("Empty time specification passed")
2868
  suffix_map = {
2869
    "s": 1,
2870
    "m": 60,
2871
    "h": 3600,
2872
    "d": 86400,
2873
    "w": 604800,
2874
    }
2875
  if value[-1] not in suffix_map:
2876
    try:
2877
      value = int(value)
2878
    except (TypeError, ValueError):
2879
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2880
  else:
2881
    multiplier = suffix_map[value[-1]]
2882
    value = value[:-1]
2883
    if not value: # no data left after stripping the suffix
2884
      raise errors.OpPrereqError("Invalid time specification (only"
2885
                                 " suffix passed)")
2886
    try:
2887
      value = int(value) * multiplier
2888
    except (TypeError, ValueError):
2889
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2890
  return value
2891

    
2892

    
2893
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2894
                   filter_master=False, nodegroup=None):
2895
  """Returns the names of online nodes.
2896

2897
  This function will also log a warning on stderr with the names of
2898
  the online nodes.
2899

2900
  @param nodes: if not empty, use only this subset of nodes (minus the
2901
      offline ones)
2902
  @param cl: if not None, luxi client to use
2903
  @type nowarn: boolean
2904
  @param nowarn: by default, this function will output a note with the
2905
      offline nodes that are skipped; if this parameter is True the
2906
      note is not displayed
2907
  @type secondary_ips: boolean
2908
  @param secondary_ips: if True, return the secondary IPs instead of the
2909
      names, useful for doing network traffic over the replication interface
2910
      (if any)
2911
  @type filter_master: boolean
2912
  @param filter_master: if True, do not return the master node in the list
2913
      (useful in coordination with secondary_ips where we cannot check our
2914
      node name against the list)
2915
  @type nodegroup: string
2916
  @param nodegroup: If set, only return nodes in this node group
2917

2918
  """
2919
  if cl is None:
2920
    cl = GetClient()
2921

    
2922
  filter_ = []
2923

    
2924
  if nodes:
2925
    filter_.append(qlang.MakeSimpleFilter("name", nodes))
2926

    
2927
  if nodegroup is not None:
2928
    filter_.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2929
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2930

    
2931
  if filter_master:
2932
    filter_.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2933

    
2934
  if filter_:
2935
    if len(filter_) > 1:
2936
      final_filter = [qlang.OP_AND] + filter_
2937
    else:
2938
      assert len(filter_) == 1
2939
      final_filter = filter_[0]
2940
  else:
2941
    final_filter = None
2942

    
2943
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2944

    
2945
  def _IsOffline(row):
2946
    (_, (_, offline), _) = row
2947
    return offline
2948

    
2949
  def _GetName(row):
2950
    ((_, name), _, _) = row
2951
    return name
2952

    
2953
  def _GetSip(row):
2954
    (_, _, (_, sip)) = row
2955
    return sip
2956

    
2957
  (offline, online) = compat.partition(result.data, _IsOffline)
2958

    
2959
  if offline and not nowarn:
2960
    ToStderr("Note: skipping offline node(s): %s" %
2961
             utils.CommaJoin(map(_GetName, offline)))
2962

    
2963
  if secondary_ips:
2964
    fn = _GetSip
2965
  else:
2966
    fn = _GetName
2967

    
2968
  return map(fn, online)
2969

    
2970

    
2971
def _ToStream(stream, txt, *args):
2972
  """Write a message to a stream, bypassing the logging system
2973

2974
  @type stream: file object
2975
  @param stream: the file to which we should write
2976
  @type txt: str
2977
  @param txt: the message
2978

2979
  """
2980
  try:
2981
    if args:
2982
      args = tuple(args)
2983
      stream.write(txt % args)
2984
    else:
2985
      stream.write(txt)
2986
    stream.write("\n")
2987
    stream.flush()
2988
  except IOError, err:
2989
    if err.errno == errno.EPIPE:
2990
      # our terminal went away, we'll exit
2991
      sys.exit(constants.EXIT_FAILURE)
2992
    else:
2993
      raise
2994

    
2995

    
2996
def ToStdout(txt, *args):
2997
  """Write a message to stdout only, bypassing the logging system
2998

2999
  This is just a wrapper over _ToStream.
3000

3001
  @type txt: str
3002
  @param txt: the message
3003

3004
  """
3005
  _ToStream(sys.stdout, txt, *args)
3006

    
3007

    
3008
def ToStderr(txt, *args):
3009
  """Write a message to stderr only, bypassing the logging system
3010

3011
  This is just a wrapper over _ToStream.
3012

3013
  @type txt: str
3014
  @param txt: the message
3015

3016
  """
3017
  _ToStream(sys.stderr, txt, *args)
3018

    
3019

    
3020
class JobExecutor(object):
3021
  """Class which manages the submission and execution of multiple jobs.
3022

3023
  Note that instances of this class should not be reused between
3024
  GetResults() calls.
3025

3026
  """
3027
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3028
    self.queue = []
3029
    if cl is None:
3030
      cl = GetClient()
3031
    self.cl = cl
3032
    self.verbose = verbose
3033
    self.jobs = []
3034
    self.opts = opts
3035
    self.feedback_fn = feedback_fn
3036
    self._counter = itertools.count()
3037

    
3038
  @staticmethod
3039
  def _IfName(name, fmt):
3040
    """Helper function for formatting name.
3041

3042
    """
3043
    if name:
3044
      return fmt % name
3045

    
3046
    return ""
3047

    
3048
  def QueueJob(self, name, *ops):
3049
    """Record a job for later submit.
3050

3051
    @type name: string
3052
    @param name: a description of the job, will be used in WaitJobSet
3053

3054
    """
3055
    SetGenericOpcodeOpts(ops, self.opts)
3056
    self.queue.append((self._counter.next(), name, ops))
3057

    
3058
  def AddJobId(self, name, status, job_id):
3059
    """Adds a job ID to the internal queue.
3060

3061
    """
3062
    self.jobs.append((self._counter.next(), status, job_id, name))
3063

    
3064
  def SubmitPending(self, each=False):
3065
    """Submit all pending jobs.
3066

3067
    """
3068
    if each:
3069
      results = []
3070
      for (_, _, ops) in self.queue:
3071
        # SubmitJob will remove the success status, but raise an exception if
3072
        # the submission fails, so we'll notice that anyway.
3073
        results.append([True, self.cl.SubmitJob(ops)])
3074
    else:
3075
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3076
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3077
      self.jobs.append((idx, status, data, name))
3078

    
3079
  def _ChooseJob(self):
3080
    """Choose a non-waiting/queued job to poll next.
3081

3082
    """
3083
    assert self.jobs, "_ChooseJob called with empty job list"
3084

    
3085
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3086
                               ["status"])
3087
    assert result
3088

    
3089
    for job_data, status in zip(self.jobs, result):
3090
      if (isinstance(status, list) and status and
3091
          status[0] in (constants.JOB_STATUS_QUEUED,
3092
                        constants.JOB_STATUS_WAITING,
3093
                        constants.JOB_STATUS_CANCELING)):
3094
        # job is still present and waiting
3095
        continue
3096
      # good candidate found (either running job or lost job)
3097
      self.jobs.remove(job_data)
3098
      return job_data
3099

    
3100
    # no job found
3101
    return self.jobs.pop(0)
3102

    
3103
  def GetResults(self):
3104
    """Wait for and return the results of all jobs.
3105

3106
    @rtype: list
3107
    @return: list of tuples (success, job results), in the same order
3108
        as the submitted jobs; if a job has failed, instead of the result
3109
        there will be the error message
3110

3111
    """
3112
    if not self.jobs:
3113
      self.SubmitPending()
3114
    results = []
3115
    if self.verbose:
3116
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3117
      if ok_jobs:
3118
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3119

    
3120
    # first, remove any non-submitted jobs
3121
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3122
    for idx, _, jid, name in failures:
3123
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3124
      results.append((idx, False, jid))
3125

    
3126
    while self.jobs:
3127
      (idx, _, jid, name) = self._ChooseJob()
3128
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3129
      try:
3130
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3131
        success = True
3132
      except errors.JobLost, err:
3133
        _, job_result = FormatError(err)
3134
        ToStderr("Job %s%s has been archived, cannot check its result",
3135
                 jid, self._IfName(name, " for %s"))
3136
        success = False
3137
      except (errors.GenericError, luxi.ProtocolError), err:
3138
        _, job_result = FormatError(err)
3139
        success = False
3140
        # the error message will always be shown, verbose or not
3141
        ToStderr("Job %s%s has failed: %s",
3142
                 jid, self._IfName(name, " for %s"), job_result)
3143

    
3144
      results.append((idx, success, job_result))
3145

    
3146
    # sort based on the index, then drop it
3147
    results.sort()
3148
    results = [i[1:] for i in results]
3149

    
3150
    return results
3151

    
3152
  def WaitOrShow(self, wait):
3153
    """Wait for job results or only print the job IDs.
3154

3155
    @type wait: boolean
3156
    @param wait: whether to wait or not
3157

3158
    """
3159
    if wait:
3160
      return self.GetResults()
3161
    else:
3162
      if not self.jobs:
3163
        self.SubmitPending()
3164
      for _, status, result, name in self.jobs:
3165
        if status:
3166
          ToStdout("%s: %s", result, name)
3167
        else:
3168
          ToStderr("Failure for %s: %s", name, result)
3169
      return [row[1:3] for row in self.jobs]
3170

    
3171

    
3172
def FormatParameterDict(buf, param_dict, actual, level=1):
3173
  """Formats a parameter dictionary.
3174

3175
  @type buf: L{StringIO}
3176
  @param buf: the buffer into which to write
3177
  @type param_dict: dict
3178
  @param param_dict: the own parameters
3179
  @type actual: dict
3180
  @param actual: the current parameter set (including defaults)
3181
  @param level: Level of indent
3182

3183
  """
3184
  indent = "  " * level
3185
  for key in sorted(actual):
3186
    val = param_dict.get(key, "default (%s)" % actual[key])
3187
    buf.write("%s- %s: %s\n" % (indent, key, val))
3188

    
3189

    
3190
def ConfirmOperation(names, list_type, text, extra=""):
3191
  """Ask the user to confirm an operation on a list of list_type.
3192

3193
  This function is used to request confirmation for doing an operation
3194
  on a given list of list_type.
3195

3196
  @type names: list
3197
  @param names: the list of names that we display when
3198
      we ask for confirmation
3199
  @type list_type: str
3200
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3201
  @type text: str
3202
  @param text: the operation that the user should confirm
3203
  @rtype: boolean
3204
  @return: True or False depending on user's confirmation.
3205

3206
  """
3207
  count = len(names)
3208
  msg = ("The %s will operate on %d %s.\n%s"
3209
         "Do you want to continue?" % (text, count, list_type, extra))
3210
  affected = (("\nAffected %s:\n" % list_type) +
3211
              "\n".join(["  %s" % name for name in names]))
3212

    
3213
  choices = [("y", True, "Yes, execute the %s" % text),
3214
             ("n", False, "No, abort the %s" % text)]
3215

    
3216
  if count > 20:
3217
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3218
    question = msg
3219
  else:
3220
    question = msg + affected
3221

    
3222
  choice = AskUser(question, choices)
3223
  if choice == "v":
3224
    choices.pop(1)
3225
    choice = AskUser(msg + affected, choices)
3226
  return choice