Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 519fafa1

History | View | Annotate | Download (107.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46

    
47
from optparse import (OptionParser, TitledHelpFormatter,
48
                      Option, OptionValueError)
49

    
50

    
51
__all__ = [
52
  # Command line options
53
  "ADD_UIDS_OPT",
54
  "ALLOCATABLE_OPT",
55
  "ALLOC_POLICY_OPT",
56
  "ALL_OPT",
57
  "ALLOW_FAILOVER_OPT",
58
  "AUTO_PROMOTE_OPT",
59
  "AUTO_REPLACE_OPT",
60
  "BACKEND_OPT",
61
  "BLK_OS_OPT",
62
  "CAPAB_MASTER_OPT",
63
  "CAPAB_VM_OPT",
64
  "CLEANUP_OPT",
65
  "CLUSTER_DOMAIN_SECRET_OPT",
66
  "CONFIRM_OPT",
67
  "CP_SIZE_OPT",
68
  "DEBUG_OPT",
69
  "DEBUG_SIMERR_OPT",
70
  "DISKIDX_OPT",
71
  "DISK_OPT",
72
  "DISK_PARAMS_OPT",
73
  "DISK_TEMPLATE_OPT",
74
  "DRAINED_OPT",
75
  "DRY_RUN_OPT",
76
  "DRBD_HELPER_OPT",
77
  "DST_NODE_OPT",
78
  "EARLY_RELEASE_OPT",
79
  "ENABLED_HV_OPT",
80
  "ERROR_CODES_OPT",
81
  "FIELDS_OPT",
82
  "FILESTORE_DIR_OPT",
83
  "FILESTORE_DRIVER_OPT",
84
  "FORCE_FILTER_OPT",
85
  "FORCE_OPT",
86
  "FORCE_VARIANT_OPT",
87
  "GLOBAL_FILEDIR_OPT",
88
  "HID_OS_OPT",
89
  "GLOBAL_SHARED_FILEDIR_OPT",
90
  "HVLIST_OPT",
91
  "HVOPTS_OPT",
92
  "HYPERVISOR_OPT",
93
  "IALLOCATOR_OPT",
94
  "DEFAULT_IALLOCATOR_OPT",
95
  "IDENTIFY_DEFAULTS_OPT",
96
  "IGNORE_CONSIST_OPT",
97
  "IGNORE_ERRORS_OPT",
98
  "IGNORE_FAILURES_OPT",
99
  "IGNORE_OFFLINE_OPT",
100
  "IGNORE_REMOVE_FAILURES_OPT",
101
  "IGNORE_SECONDARIES_OPT",
102
  "IGNORE_SIZE_OPT",
103
  "INTERVAL_OPT",
104
  "MAC_PREFIX_OPT",
105
  "MAINTAIN_NODE_HEALTH_OPT",
106
  "MASTER_NETDEV_OPT",
107
  "MASTER_NETMASK_OPT",
108
  "MC_OPT",
109
  "MIGRATION_MODE_OPT",
110
  "NET_OPT",
111
  "NEW_CLUSTER_CERT_OPT",
112
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113
  "NEW_CONFD_HMAC_KEY_OPT",
114
  "NEW_RAPI_CERT_OPT",
115
  "NEW_SECONDARY_OPT",
116
  "NEW_SPICE_CERT_OPT",
117
  "NIC_PARAMS_OPT",
118
  "NODE_FORCE_JOIN_OPT",
119
  "NODE_LIST_OPT",
120
  "NODE_PLACEMENT_OPT",
121
  "NODEGROUP_OPT",
122
  "NODE_PARAMS_OPT",
123
  "NODE_POWERED_OPT",
124
  "NODRBD_STORAGE_OPT",
125
  "NOHDR_OPT",
126
  "NOIPCHECK_OPT",
127
  "NO_INSTALL_OPT",
128
  "NONAMECHECK_OPT",
129
  "NOLVM_STORAGE_OPT",
130
  "NOMODIFY_ETCHOSTS_OPT",
131
  "NOMODIFY_SSH_SETUP_OPT",
132
  "NONICS_OPT",
133
  "NONLIVE_OPT",
134
  "NONPLUS1_OPT",
135
  "NOSHUTDOWN_OPT",
136
  "NOSTART_OPT",
137
  "NOSSH_KEYCHECK_OPT",
138
  "NOVOTING_OPT",
139
  "NO_REMEMBER_OPT",
140
  "NWSYNC_OPT",
141
  "OFFLINE_INST_OPT",
142
  "ONLINE_INST_OPT",
143
  "ON_PRIMARY_OPT",
144
  "ON_SECONDARY_OPT",
145
  "OFFLINE_OPT",
146
  "OSPARAMS_OPT",
147
  "OS_OPT",
148
  "OS_SIZE_OPT",
149
  "OOB_TIMEOUT_OPT",
150
  "POWER_DELAY_OPT",
151
  "PREALLOC_WIPE_DISKS_OPT",
152
  "PRIMARY_IP_VERSION_OPT",
153
  "PRIMARY_ONLY_OPT",
154
  "PRIORITY_OPT",
155
  "RAPI_CERT_OPT",
156
  "READD_OPT",
157
  "REBOOT_TYPE_OPT",
158
  "REMOVE_INSTANCE_OPT",
159
  "REMOVE_UIDS_OPT",
160
  "RESERVED_LVS_OPT",
161
  "ROMAN_OPT",
162
  "SECONDARY_IP_OPT",
163
  "SECONDARY_ONLY_OPT",
164
  "SELECT_OS_OPT",
165
  "SEP_OPT",
166
  "SHOWCMD_OPT",
167
  "SHUTDOWN_TIMEOUT_OPT",
168
  "SINGLE_NODE_OPT",
169
  "SPECS_CPU_COUNT_OPT",
170
  "SPECS_DISK_COUNT_OPT",
171
  "SPECS_DISK_SIZE_OPT",
172
  "SPECS_MEM_SIZE_OPT",
173
  "SPECS_NIC_COUNT_OPT",
174
  "SPICE_CACERT_OPT",
175
  "SPICE_CERT_OPT",
176
  "SRC_DIR_OPT",
177
  "SRC_NODE_OPT",
178
  "SUBMIT_OPT",
179
  "STARTUP_PAUSED_OPT",
180
  "STATIC_OPT",
181
  "SYNC_OPT",
182
  "TAG_ADD_OPT",
183
  "TAG_SRC_OPT",
184
  "TIMEOUT_OPT",
185
  "TO_GROUP_OPT",
186
  "UIDPOOL_OPT",
187
  "USEUNITS_OPT",
188
  "USE_EXTERNAL_MIP_SCRIPT",
189
  "USE_REPL_NET_OPT",
190
  "VERBOSE_OPT",
191
  "VG_NAME_OPT",
192
  "YES_DOIT_OPT",
193
  "DISK_STATE_OPT",
194
  "HV_STATE_OPT",
195
  # Generic functions for CLI programs
196
  "ConfirmOperation",
197
  "GenericMain",
198
  "GenericInstanceCreate",
199
  "GenericList",
200
  "GenericListFields",
201
  "GetClient",
202
  "GetOnlineNodes",
203
  "JobExecutor",
204
  "JobSubmittedException",
205
  "ParseTimespec",
206
  "RunWhileClusterStopped",
207
  "SubmitOpCode",
208
  "SubmitOrSend",
209
  "UsesRPC",
210
  # Formatting functions
211
  "ToStderr", "ToStdout",
212
  "FormatError",
213
  "FormatQueryResult",
214
  "FormatParameterDict",
215
  "GenerateTable",
216
  "AskUser",
217
  "FormatTimestamp",
218
  "FormatLogMessage",
219
  # Tags functions
220
  "ListTags",
221
  "AddTags",
222
  "RemoveTags",
223
  # command line options support infrastructure
224
  "ARGS_MANY_INSTANCES",
225
  "ARGS_MANY_NODES",
226
  "ARGS_MANY_GROUPS",
227
  "ARGS_NONE",
228
  "ARGS_ONE_INSTANCE",
229
  "ARGS_ONE_NODE",
230
  "ARGS_ONE_GROUP",
231
  "ARGS_ONE_OS",
232
  "ArgChoice",
233
  "ArgCommand",
234
  "ArgFile",
235
  "ArgGroup",
236
  "ArgHost",
237
  "ArgInstance",
238
  "ArgJobId",
239
  "ArgNode",
240
  "ArgOs",
241
  "ArgSuggest",
242
  "ArgUnknown",
243
  "OPT_COMPL_INST_ADD_NODES",
244
  "OPT_COMPL_MANY_NODES",
245
  "OPT_COMPL_ONE_IALLOCATOR",
246
  "OPT_COMPL_ONE_INSTANCE",
247
  "OPT_COMPL_ONE_NODE",
248
  "OPT_COMPL_ONE_NODEGROUP",
249
  "OPT_COMPL_ONE_OS",
250
  "cli_option",
251
  "SplitNodeOption",
252
  "CalculateOSNames",
253
  "ParseFields",
254
  "COMMON_CREATE_OPTS",
255
  ]
256

    
257
NO_PREFIX = "no_"
258
UN_PREFIX = "-"
259

    
260
#: Priorities (sorted)
261
_PRIORITY_NAMES = [
262
  ("low", constants.OP_PRIO_LOW),
263
  ("normal", constants.OP_PRIO_NORMAL),
264
  ("high", constants.OP_PRIO_HIGH),
265
  ]
266

    
267
#: Priority dictionary for easier lookup
268
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
269
# we migrate to Python 2.6
270
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
271

    
272
# Query result status for clients
273
(QR_NORMAL,
274
 QR_UNKNOWN,
275
 QR_INCOMPLETE) = range(3)
276

    
277
#: Maximum batch size for ChooseJob
278
_CHOOSE_BATCH = 25
279

    
280

    
281
class _Argument:
282
  def __init__(self, min=0, max=None): # pylint: disable=W0622
283
    self.min = min
284
    self.max = max
285

    
286
  def __repr__(self):
287
    return ("<%s min=%s max=%s>" %
288
            (self.__class__.__name__, self.min, self.max))
289

    
290

    
291
class ArgSuggest(_Argument):
292
  """Suggesting argument.
293

294
  Value can be any of the ones passed to the constructor.
295

296
  """
297
  # pylint: disable=W0622
298
  def __init__(self, min=0, max=None, choices=None):
299
    _Argument.__init__(self, min=min, max=max)
300
    self.choices = choices
301

    
302
  def __repr__(self):
303
    return ("<%s min=%s max=%s choices=%r>" %
304
            (self.__class__.__name__, self.min, self.max, self.choices))
305

    
306

    
307
class ArgChoice(ArgSuggest):
308
  """Choice argument.
309

310
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
311
  but value must be one of the choices.
312

313
  """
314

    
315

    
316
class ArgUnknown(_Argument):
317
  """Unknown argument to program (e.g. determined at runtime).
318

319
  """
320

    
321

    
322
class ArgInstance(_Argument):
323
  """Instances argument.
324

325
  """
326

    
327

    
328
class ArgNode(_Argument):
329
  """Node argument.
330

331
  """
332

    
333

    
334
class ArgGroup(_Argument):
335
  """Node group argument.
336

337
  """
338

    
339

    
340
class ArgJobId(_Argument):
341
  """Job ID argument.
342

343
  """
344

    
345

    
346
class ArgFile(_Argument):
347
  """File path argument.
348

349
  """
350

    
351

    
352
class ArgCommand(_Argument):
353
  """Command argument.
354

355
  """
356

    
357

    
358
class ArgHost(_Argument):
359
  """Host argument.
360

361
  """
362

    
363

    
364
class ArgOs(_Argument):
365
  """OS argument.
366

367
  """
368

    
369

    
370
ARGS_NONE = []
371
ARGS_MANY_INSTANCES = [ArgInstance()]
372
ARGS_MANY_NODES = [ArgNode()]
373
ARGS_MANY_GROUPS = [ArgGroup()]
374
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
375
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
376
# TODO
377
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
378
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
379

    
380

    
381
def _ExtractTagsObject(opts, args):
382
  """Extract the tag type object.
383

384
  Note that this function will modify its args parameter.
385

386
  """
387
  if not hasattr(opts, "tag_type"):
388
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
389
  kind = opts.tag_type
390
  if kind == constants.TAG_CLUSTER:
391
    retval = kind, kind
392
  elif kind in (constants.TAG_NODEGROUP,
393
                constants.TAG_NODE,
394
                constants.TAG_INSTANCE):
395
    if not args:
396
      raise errors.OpPrereqError("no arguments passed to the command")
397
    name = args.pop(0)
398
    retval = kind, name
399
  else:
400
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
401
  return retval
402

    
403

    
404
def _ExtendTags(opts, args):
405
  """Extend the args if a source file has been given.
406

407
  This function will extend the tags with the contents of the file
408
  passed in the 'tags_source' attribute of the opts parameter. A file
409
  named '-' will be replaced by stdin.
410

411
  """
412
  fname = opts.tags_source
413
  if fname is None:
414
    return
415
  if fname == "-":
416
    new_fh = sys.stdin
417
  else:
418
    new_fh = open(fname, "r")
419
  new_data = []
420
  try:
421
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
422
    # because of python bug 1633941
423
    while True:
424
      line = new_fh.readline()
425
      if not line:
426
        break
427
      new_data.append(line.strip())
428
  finally:
429
    new_fh.close()
430
  args.extend(new_data)
431

    
432

    
433
def ListTags(opts, args):
434
  """List the tags on a given object.
435

436
  This is a generic implementation that knows how to deal with all
437
  three cases of tag objects (cluster, node, instance). The opts
438
  argument is expected to contain a tag_type field denoting what
439
  object type we work on.
440

441
  """
442
  kind, name = _ExtractTagsObject(opts, args)
443
  cl = GetClient()
444
  result = cl.QueryTags(kind, name)
445
  result = list(result)
446
  result.sort()
447
  for tag in result:
448
    ToStdout(tag)
449

    
450

    
451
def AddTags(opts, args):
452
  """Add tags on a given object.
453

454
  This is a generic implementation that knows how to deal with all
455
  three cases of tag objects (cluster, node, instance). The opts
456
  argument is expected to contain a tag_type field denoting what
457
  object type we work on.
458

459
  """
460
  kind, name = _ExtractTagsObject(opts, args)
461
  _ExtendTags(opts, args)
462
  if not args:
463
    raise errors.OpPrereqError("No tags to be added")
464
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
465
  SubmitOpCode(op, opts=opts)
466

    
467

    
468
def RemoveTags(opts, args):
469
  """Remove tags from a given object.
470

471
  This is a generic implementation that knows how to deal with all
472
  three cases of tag objects (cluster, node, instance). The opts
473
  argument is expected to contain a tag_type field denoting what
474
  object type we work on.
475

476
  """
477
  kind, name = _ExtractTagsObject(opts, args)
478
  _ExtendTags(opts, args)
479
  if not args:
480
    raise errors.OpPrereqError("No tags to be removed")
481
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
482
  SubmitOpCode(op, opts=opts)
483

    
484

    
485
def check_unit(option, opt, value): # pylint: disable=W0613
486
  """OptParsers custom converter for units.
487

488
  """
489
  try:
490
    return utils.ParseUnit(value)
491
  except errors.UnitParseError, err:
492
    raise OptionValueError("option %s: %s" % (opt, err))
493

    
494

    
495
def _SplitKeyVal(opt, data):
496
  """Convert a KeyVal string into a dict.
497

498
  This function will convert a key=val[,...] string into a dict. Empty
499
  values will be converted specially: keys which have the prefix 'no_'
500
  will have the value=False and the prefix stripped, the others will
501
  have value=True.
502

503
  @type opt: string
504
  @param opt: a string holding the option name for which we process the
505
      data, used in building error messages
506
  @type data: string
507
  @param data: a string of the format key=val,key=val,...
508
  @rtype: dict
509
  @return: {key=val, key=val}
510
  @raises errors.ParameterError: if there are duplicate keys
511

512
  """
513
  kv_dict = {}
514
  if data:
515
    for elem in utils.UnescapeAndSplit(data, sep=","):
516
      if "=" in elem:
517
        key, val = elem.split("=", 1)
518
      else:
519
        if elem.startswith(NO_PREFIX):
520
          key, val = elem[len(NO_PREFIX):], False
521
        elif elem.startswith(UN_PREFIX):
522
          key, val = elem[len(UN_PREFIX):], None
523
        else:
524
          key, val = elem, True
525
      if key in kv_dict:
526
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
527
                                    (key, opt))
528
      kv_dict[key] = val
529
  return kv_dict
530

    
531

    
532
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
533
  """Custom parser for ident:key=val,key=val options.
534

535
  This will store the parsed values as a tuple (ident, {key: val}). As such,
536
  multiple uses of this option via action=append is possible.
537

538
  """
539
  if ":" not in value:
540
    ident, rest = value, ""
541
  else:
542
    ident, rest = value.split(":", 1)
543

    
544
  if ident.startswith(NO_PREFIX):
545
    if rest:
546
      msg = "Cannot pass options when removing parameter groups: %s" % value
547
      raise errors.ParameterError(msg)
548
    retval = (ident[len(NO_PREFIX):], False)
549
  elif ident.startswith(UN_PREFIX):
550
    if rest:
551
      msg = "Cannot pass options when removing parameter groups: %s" % value
552
      raise errors.ParameterError(msg)
553
    retval = (ident[len(UN_PREFIX):], None)
554
  else:
555
    kv_dict = _SplitKeyVal(opt, rest)
556
    retval = (ident, kv_dict)
557
  return retval
558

    
559

    
560
def check_key_val(option, opt, value):  # pylint: disable=W0613
561
  """Custom parser class for key=val,key=val options.
562

563
  This will store the parsed values as a dict {key: val}.
564

565
  """
566
  return _SplitKeyVal(opt, value)
567

    
568

    
569
def check_bool(option, opt, value): # pylint: disable=W0613
570
  """Custom parser for yes/no options.
571

572
  This will store the parsed value as either True or False.
573

574
  """
575
  value = value.lower()
576
  if value == constants.VALUE_FALSE or value == "no":
577
    return False
578
  elif value == constants.VALUE_TRUE or value == "yes":
579
    return True
580
  else:
581
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
582

    
583

    
584
# completion_suggestion is normally a list. Using numeric values not evaluating
585
# to False for dynamic completion.
586
(OPT_COMPL_MANY_NODES,
587
 OPT_COMPL_ONE_NODE,
588
 OPT_COMPL_ONE_INSTANCE,
589
 OPT_COMPL_ONE_OS,
590
 OPT_COMPL_ONE_IALLOCATOR,
591
 OPT_COMPL_INST_ADD_NODES,
592
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
593

    
594
OPT_COMPL_ALL = frozenset([
595
  OPT_COMPL_MANY_NODES,
596
  OPT_COMPL_ONE_NODE,
597
  OPT_COMPL_ONE_INSTANCE,
598
  OPT_COMPL_ONE_OS,
599
  OPT_COMPL_ONE_IALLOCATOR,
600
  OPT_COMPL_INST_ADD_NODES,
601
  OPT_COMPL_ONE_NODEGROUP,
602
  ])
603

    
604

    
605
class CliOption(Option):
606
  """Custom option class for optparse.
607

608
  """
609
  ATTRS = Option.ATTRS + [
610
    "completion_suggest",
611
    ]
612
  TYPES = Option.TYPES + (
613
    "identkeyval",
614
    "keyval",
615
    "unit",
616
    "bool",
617
    )
618
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
619
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
620
  TYPE_CHECKER["keyval"] = check_key_val
621
  TYPE_CHECKER["unit"] = check_unit
622
  TYPE_CHECKER["bool"] = check_bool
623

    
624

    
625
# optparse.py sets make_option, so we do it for our own option class, too
626
cli_option = CliOption
627

    
628

    
629
_YORNO = "yes|no"
630

    
631
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
632
                       help="Increase debugging level")
633

    
634
NOHDR_OPT = cli_option("--no-headers", default=False,
635
                       action="store_true", dest="no_headers",
636
                       help="Don't display column headers")
637

    
638
SEP_OPT = cli_option("--separator", default=None,
639
                     action="store", dest="separator",
640
                     help=("Separator between output fields"
641
                           " (defaults to one space)"))
642

    
643
USEUNITS_OPT = cli_option("--units", default=None,
644
                          dest="units", choices=("h", "m", "g", "t"),
645
                          help="Specify units for output (one of h/m/g/t)")
646

    
647
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
648
                        type="string", metavar="FIELDS",
649
                        help="Comma separated list of output fields")
650

    
651
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
652
                       default=False, help="Force the operation")
653

    
654
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
655
                         default=False, help="Do not require confirmation")
656

    
657
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
658
                                  action="store_true", default=False,
659
                                  help=("Ignore offline nodes and do as much"
660
                                        " as possible"))
661

    
662
TAG_ADD_OPT = cli_option("--tags", dest="tags",
663
                         default=None, help="Comma-separated list of instance"
664
                                            " tags")
665

    
666
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
667
                         default=None, help="File with tag names")
668

    
669
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
670
                        default=False, action="store_true",
671
                        help=("Submit the job and return the job ID, but"
672
                              " don't wait for the job to finish"))
673

    
674
SYNC_OPT = cli_option("--sync", dest="do_locking",
675
                      default=False, action="store_true",
676
                      help=("Grab locks while doing the queries"
677
                            " in order to ensure more consistent results"))
678

    
679
DRY_RUN_OPT = cli_option("--dry-run", default=False,
680
                         action="store_true",
681
                         help=("Do not execute the operation, just run the"
682
                               " check steps and verify it it could be"
683
                               " executed"))
684

    
685
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
686
                         action="store_true",
687
                         help="Increase the verbosity of the operation")
688

    
689
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
690
                              action="store_true", dest="simulate_errors",
691
                              help="Debugging option that makes the operation"
692
                              " treat most runtime checks as failed")
693

    
694
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
695
                        default=True, action="store_false",
696
                        help="Don't wait for sync (DANGEROUS!)")
697

    
698
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
699
                             action="store_true", default=False,
700
                             help="Enable offline instance")
701

    
702
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
703
                              action="store_true", default=False,
704
                              help="Disable down instance")
705

    
706
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
707
                               help=("Custom disk setup (%s)" %
708
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
709
                               default=None, metavar="TEMPL",
710
                               choices=list(constants.DISK_TEMPLATES))
711

    
712
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
713
                        help="Do not create any network cards for"
714
                        " the instance")
715

    
716
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
717
                               help="Relative path under default cluster-wide"
718
                               " file storage dir to store file-based disks",
719
                               default=None, metavar="<DIR>")
720

    
721
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
722
                                  help="Driver to use for image files",
723
                                  default="loop", metavar="<DRIVER>",
724
                                  choices=list(constants.FILE_DRIVER))
725

    
726
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
727
                            help="Select nodes for the instance automatically"
728
                            " using the <NAME> iallocator plugin",
729
                            default=None, type="string",
730
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
731

    
732
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
733
                            metavar="<NAME>",
734
                            help="Set the default instance allocator plugin",
735
                            default=None, type="string",
736
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
737

    
738
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
739
                    metavar="<os>",
740
                    completion_suggest=OPT_COMPL_ONE_OS)
741

    
742
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
743
                         type="keyval", default={},
744
                         help="OS parameters")
745

    
746
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
747
                               action="store_true", default=False,
748
                               help="Force an unknown variant")
749

    
750
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
751
                            action="store_true", default=False,
752
                            help="Do not install the OS (will"
753
                            " enable no-start)")
754

    
755
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
756
                         type="keyval", default={},
757
                         help="Backend parameters")
758

    
759
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
760
                        default={}, dest="hvparams",
761
                        help="Hypervisor parameters")
762

    
763
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
764
                             help="Disk template parameters, in the format"
765
                             " template:option=value,option=value,...",
766
                             type="identkeyval", action="append", default=[])
767

    
768
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
769
                                 type="keyval", default={},
770
                                 help="Memory count specs: min, max, std"
771
                                 " (in MB)")
772

    
773
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
774
                                 type="keyval", default={},
775
                                 help="CPU count specs: min, max, std")
776

    
777
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
778
                                  dest="ispecs_disk_count",
779
                                  type="keyval", default={},
780
                                  help="Disk count specs: min, max, std")
781

    
782
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
783
                                 type="keyval", default={},
784
                                 help="Disk size specs: min, max, std (in MB)")
785

    
786
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
787
                                 type="keyval", default={},
788
                                 help="NIC count specs: min, max, std")
789

    
790
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
791
                            help="Hypervisor and hypervisor options, in the"
792
                            " format hypervisor:option=value,option=value,...",
793
                            default=None, type="identkeyval")
794

    
795
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
796
                        help="Hypervisor and hypervisor options, in the"
797
                        " format hypervisor:option=value,option=value,...",
798
                        default=[], action="append", type="identkeyval")
799

    
800
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
801
                           action="store_false",
802
                           help="Don't check that the instance's IP"
803
                           " is alive")
804

    
805
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
806
                             default=True, action="store_false",
807
                             help="Don't check that the instance's name"
808
                             " is resolvable")
809

    
810
NET_OPT = cli_option("--net",
811
                     help="NIC parameters", default=[],
812
                     dest="nics", action="append", type="identkeyval")
813

    
814
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
815
                      dest="disks", action="append", type="identkeyval")
816

    
817
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
818
                         help="Comma-separated list of disks"
819
                         " indices to act on (e.g. 0,2) (optional,"
820
                         " defaults to all disks)")
821

    
822
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
823
                         help="Enforces a single-disk configuration using the"
824
                         " given disk size, in MiB unless a suffix is used",
825
                         default=None, type="unit", metavar="<size>")
826

    
827
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
828
                                dest="ignore_consistency",
829
                                action="store_true", default=False,
830
                                help="Ignore the consistency of the disks on"
831
                                " the secondary")
832

    
833
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
834
                                dest="allow_failover",
835
                                action="store_true", default=False,
836
                                help="If migration is not possible fallback to"
837
                                     " failover")
838

    
839
NONLIVE_OPT = cli_option("--non-live", dest="live",
840
                         default=True, action="store_false",
841
                         help="Do a non-live migration (this usually means"
842
                         " freeze the instance, save the state, transfer and"
843
                         " only then resume running on the secondary node)")
844

    
845
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
846
                                default=None,
847
                                choices=list(constants.HT_MIGRATION_MODES),
848
                                help="Override default migration mode (choose"
849
                                " either live or non-live")
850

    
851
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
852
                                help="Target node and optional secondary node",
853
                                metavar="<pnode>[:<snode>]",
854
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
855

    
856
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
857
                           action="append", metavar="<node>",
858
                           help="Use only this node (can be used multiple"
859
                           " times, if not given defaults to all nodes)",
860
                           completion_suggest=OPT_COMPL_ONE_NODE)
861

    
862
NODEGROUP_OPT_NAME = "--node-group"
863
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
864
                           dest="nodegroup",
865
                           help="Node group (name or uuid)",
866
                           metavar="<nodegroup>",
867
                           default=None, type="string",
868
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
869

    
870
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
871
                             metavar="<node>",
872
                             completion_suggest=OPT_COMPL_ONE_NODE)
873

    
874
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
875
                         action="store_false",
876
                         help="Don't start the instance after creation")
877

    
878
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
879
                         action="store_true", default=False,
880
                         help="Show command instead of executing it")
881

    
882
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
883
                         default=False, action="store_true",
884
                         help="Instead of performing the migration, try to"
885
                         " recover from a failed cleanup. This is safe"
886
                         " to run even if the instance is healthy, but it"
887
                         " will create extra replication traffic and "
888
                         " disrupt briefly the replication (like during the"
889
                         " migration")
890

    
891
STATIC_OPT = cli_option("-s", "--static", dest="static",
892
                        action="store_true", default=False,
893
                        help="Only show configuration data, not runtime data")
894

    
895
ALL_OPT = cli_option("--all", dest="show_all",
896
                     default=False, action="store_true",
897
                     help="Show info on all instances on the cluster."
898
                     " This can take a long time to run, use wisely")
899

    
900
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
901
                           action="store_true", default=False,
902
                           help="Interactive OS reinstall, lists available"
903
                           " OS templates for selection")
904

    
905
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
906
                                 action="store_true", default=False,
907
                                 help="Remove the instance from the cluster"
908
                                 " configuration even if there are failures"
909
                                 " during the removal process")
910

    
911
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
912
                                        dest="ignore_remove_failures",
913
                                        action="store_true", default=False,
914
                                        help="Remove the instance from the"
915
                                        " cluster configuration even if there"
916
                                        " are failures during the removal"
917
                                        " process")
918

    
919
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
920
                                 action="store_true", default=False,
921
                                 help="Remove the instance from the cluster")
922

    
923
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
924
                               help="Specifies the new node for the instance",
925
                               metavar="NODE", default=None,
926
                               completion_suggest=OPT_COMPL_ONE_NODE)
927

    
928
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
929
                               help="Specifies the new secondary node",
930
                               metavar="NODE", default=None,
931
                               completion_suggest=OPT_COMPL_ONE_NODE)
932

    
933
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
934
                            default=False, action="store_true",
935
                            help="Replace the disk(s) on the primary"
936
                                 " node (applies only to internally mirrored"
937
                                 " disk templates, e.g. %s)" %
938
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
939

    
940
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
941
                              default=False, action="store_true",
942
                              help="Replace the disk(s) on the secondary"
943
                                   " node (applies only to internally mirrored"
944
                                   " disk templates, e.g. %s)" %
945
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
946

    
947
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
948
                              default=False, action="store_true",
949
                              help="Lock all nodes and auto-promote as needed"
950
                              " to MC status")
951

    
952
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
953
                              default=False, action="store_true",
954
                              help="Automatically replace faulty disks"
955
                                   " (applies only to internally mirrored"
956
                                   " disk templates, e.g. %s)" %
957
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
958

    
959
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
960
                             default=False, action="store_true",
961
                             help="Ignore current recorded size"
962
                             " (useful for forcing activation when"
963
                             " the recorded size is wrong)")
964

    
965
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
966
                          metavar="<node>",
967
                          completion_suggest=OPT_COMPL_ONE_NODE)
968

    
969
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
970
                         metavar="<dir>")
971

    
972
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
973
                              help="Specify the secondary ip for the node",
974
                              metavar="ADDRESS", default=None)
975

    
976
READD_OPT = cli_option("--readd", dest="readd",
977
                       default=False, action="store_true",
978
                       help="Readd old node after replacing it")
979

    
980
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
981
                                default=True, action="store_false",
982
                                help="Disable SSH key fingerprint checking")
983

    
984
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
985
                                 default=False, action="store_true",
986
                                 help="Force the joining of a node")
987

    
988
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
989
                    type="bool", default=None, metavar=_YORNO,
990
                    help="Set the master_candidate flag on the node")
991

    
992
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
993
                         type="bool", default=None,
994
                         help=("Set the offline flag on the node"
995
                               " (cluster does not communicate with offline"
996
                               " nodes)"))
997

    
998
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
999
                         type="bool", default=None,
1000
                         help=("Set the drained flag on the node"
1001
                               " (excluded from allocation operations)"))
1002

    
1003
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1004
                    type="bool", default=None, metavar=_YORNO,
1005
                    help="Set the master_capable flag on the node")
1006

    
1007
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1008
                    type="bool", default=None, metavar=_YORNO,
1009
                    help="Set the vm_capable flag on the node")
1010

    
1011
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1012
                             type="bool", default=None, metavar=_YORNO,
1013
                             help="Set the allocatable flag on a volume")
1014

    
1015
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1016
                               help="Disable support for lvm based instances"
1017
                               " (cluster-wide)",
1018
                               action="store_false", default=True)
1019

    
1020
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1021
                            dest="enabled_hypervisors",
1022
                            help="Comma-separated list of hypervisors",
1023
                            type="string", default=None)
1024

    
1025
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1026
                            type="keyval", default={},
1027
                            help="NIC parameters")
1028

    
1029
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1030
                         dest="candidate_pool_size", type="int",
1031
                         help="Set the candidate pool size")
1032

    
1033
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1034
                         help=("Enables LVM and specifies the volume group"
1035
                               " name (cluster-wide) for disk allocation"
1036
                               " [%s]" % constants.DEFAULT_VG),
1037
                         metavar="VG", default=None)
1038

    
1039
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1040
                          help="Destroy cluster", action="store_true")
1041

    
1042
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1043
                          help="Skip node agreement check (dangerous)",
1044
                          action="store_true", default=False)
1045

    
1046
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1047
                            help="Specify the mac prefix for the instance IP"
1048
                            " addresses, in the format XX:XX:XX",
1049
                            metavar="PREFIX",
1050
                            default=None)
1051

    
1052
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1053
                               help="Specify the node interface (cluster-wide)"
1054
                               " on which the master IP address will be added"
1055
                               " (cluster init default: %s)" %
1056
                               constants.DEFAULT_BRIDGE,
1057
                               metavar="NETDEV",
1058
                               default=None)
1059

    
1060
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1061
                                help="Specify the netmask of the master IP",
1062
                                metavar="NETMASK",
1063
                                default=None)
1064

    
1065
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1066
                                dest="use_external_mip_script",
1067
                                help="Specify whether to run a user-provided"
1068
                                " script for the master IP address turnup and"
1069
                                " turndown operations",
1070
                                type="bool", metavar=_YORNO, default=None)
1071

    
1072
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1073
                                help="Specify the default directory (cluster-"
1074
                                "wide) for storing the file-based disks [%s]" %
1075
                                constants.DEFAULT_FILE_STORAGE_DIR,
1076
                                metavar="DIR",
1077
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1078

    
1079
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1080
                            dest="shared_file_storage_dir",
1081
                            help="Specify the default directory (cluster-"
1082
                            "wide) for storing the shared file-based"
1083
                            " disks [%s]" %
1084
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1085
                            metavar="SHAREDDIR",
1086
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1087

    
1088
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1089
                                   help="Don't modify /etc/hosts",
1090
                                   action="store_false", default=True)
1091

    
1092
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1093
                                    help="Don't initialize SSH keys",
1094
                                    action="store_false", default=True)
1095

    
1096
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1097
                             help="Enable parseable error messages",
1098
                             action="store_true", default=False)
1099

    
1100
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1101
                          help="Skip N+1 memory redundancy tests",
1102
                          action="store_true", default=False)
1103

    
1104
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1105
                             help="Type of reboot: soft/hard/full",
1106
                             default=constants.INSTANCE_REBOOT_HARD,
1107
                             metavar="<REBOOT>",
1108
                             choices=list(constants.REBOOT_TYPES))
1109

    
1110
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1111
                                    dest="ignore_secondaries",
1112
                                    default=False, action="store_true",
1113
                                    help="Ignore errors from secondaries")
1114

    
1115
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1116
                            action="store_false", default=True,
1117
                            help="Don't shutdown the instance (unsafe)")
1118

    
1119
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1120
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1121
                         help="Maximum time to wait")
1122

    
1123
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1124
                         dest="shutdown_timeout", type="int",
1125
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1126
                         help="Maximum time to wait for instance shutdown")
1127

    
1128
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1129
                          default=None,
1130
                          help=("Number of seconds between repetions of the"
1131
                                " command"))
1132

    
1133
EARLY_RELEASE_OPT = cli_option("--early-release",
1134
                               dest="early_release", default=False,
1135
                               action="store_true",
1136
                               help="Release the locks on the secondary"
1137
                               " node(s) early")
1138

    
1139
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1140
                                  dest="new_cluster_cert",
1141
                                  default=False, action="store_true",
1142
                                  help="Generate a new cluster certificate")
1143

    
1144
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1145
                           default=None,
1146
                           help="File containing new RAPI certificate")
1147

    
1148
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1149
                               default=None, action="store_true",
1150
                               help=("Generate a new self-signed RAPI"
1151
                                     " certificate"))
1152

    
1153
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1154
                           default=None,
1155
                           help="File containing new SPICE certificate")
1156

    
1157
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1158
                           default=None,
1159
                           help="File containing the certificate of the CA"
1160
                                " which signed the SPICE certificate")
1161

    
1162
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1163
                               dest="new_spice_cert", default=None,
1164
                               action="store_true",
1165
                               help=("Generate a new self-signed SPICE"
1166
                                     " certificate"))
1167

    
1168
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1169
                                    dest="new_confd_hmac_key",
1170
                                    default=False, action="store_true",
1171
                                    help=("Create a new HMAC key for %s" %
1172
                                          constants.CONFD))
1173

    
1174
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1175
                                       dest="cluster_domain_secret",
1176
                                       default=None,
1177
                                       help=("Load new new cluster domain"
1178
                                             " secret from file"))
1179

    
1180
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1181
                                           dest="new_cluster_domain_secret",
1182
                                           default=False, action="store_true",
1183
                                           help=("Create a new cluster domain"
1184
                                                 " secret"))
1185

    
1186
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1187
                              dest="use_replication_network",
1188
                              help="Whether to use the replication network"
1189
                              " for talking to the nodes",
1190
                              action="store_true", default=False)
1191

    
1192
MAINTAIN_NODE_HEALTH_OPT = \
1193
    cli_option("--maintain-node-health", dest="maintain_node_health",
1194
               metavar=_YORNO, default=None, type="bool",
1195
               help="Configure the cluster to automatically maintain node"
1196
               " health, by shutting down unknown instances, shutting down"
1197
               " unknown DRBD devices, etc.")
1198

    
1199
IDENTIFY_DEFAULTS_OPT = \
1200
    cli_option("--identify-defaults", dest="identify_defaults",
1201
               default=False, action="store_true",
1202
               help="Identify which saved instance parameters are equal to"
1203
               " the current cluster defaults and set them as such, instead"
1204
               " of marking them as overridden")
1205

    
1206
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1207
                         action="store", dest="uid_pool",
1208
                         help=("A list of user-ids or user-id"
1209
                               " ranges separated by commas"))
1210

    
1211
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1212
                          action="store", dest="add_uids",
1213
                          help=("A list of user-ids or user-id"
1214
                                " ranges separated by commas, to be"
1215
                                " added to the user-id pool"))
1216

    
1217
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1218
                             action="store", dest="remove_uids",
1219
                             help=("A list of user-ids or user-id"
1220
                                   " ranges separated by commas, to be"
1221
                                   " removed from the user-id pool"))
1222

    
1223
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1224
                             action="store", dest="reserved_lvs",
1225
                             help=("A comma-separated list of reserved"
1226
                                   " logical volumes names, that will be"
1227
                                   " ignored by cluster verify"))
1228

    
1229
ROMAN_OPT = cli_option("--roman",
1230
                       dest="roman_integers", default=False,
1231
                       action="store_true",
1232
                       help="Use roman numbers for positive integers")
1233

    
1234
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1235
                             action="store", default=None,
1236
                             help="Specifies usermode helper for DRBD")
1237

    
1238
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1239
                                action="store_false", default=True,
1240
                                help="Disable support for DRBD")
1241

    
1242
PRIMARY_IP_VERSION_OPT = \
1243
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1244
               action="store", dest="primary_ip_version",
1245
               metavar="%d|%d" % (constants.IP4_VERSION,
1246
                                  constants.IP6_VERSION),
1247
               help="Cluster-wide IP version for primary IP")
1248

    
1249
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1250
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1251
                          choices=_PRIONAME_TO_VALUE.keys(),
1252
                          help="Priority for opcode processing")
1253

    
1254
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1255
                        type="bool", default=None, metavar=_YORNO,
1256
                        help="Sets the hidden flag on the OS")
1257

    
1258
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1259
                        type="bool", default=None, metavar=_YORNO,
1260
                        help="Sets the blacklisted flag on the OS")
1261

    
1262
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1263
                                     type="bool", metavar=_YORNO,
1264
                                     dest="prealloc_wipe_disks",
1265
                                     help=("Wipe disks prior to instance"
1266
                                           " creation"))
1267

    
1268
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1269
                             type="keyval", default=None,
1270
                             help="Node parameters")
1271

    
1272
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1273
                              action="store", metavar="POLICY", default=None,
1274
                              help="Allocation policy for the node group")
1275

    
1276
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1277
                              type="bool", metavar=_YORNO,
1278
                              dest="node_powered",
1279
                              help="Specify if the SoR for node is powered")
1280

    
1281
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1282
                         default=constants.OOB_TIMEOUT,
1283
                         help="Maximum time to wait for out-of-band helper")
1284

    
1285
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1286
                             default=constants.OOB_POWER_DELAY,
1287
                             help="Time in seconds to wait between power-ons")
1288

    
1289
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1290
                              action="store_true", default=False,
1291
                              help=("Whether command argument should be treated"
1292
                                    " as filter"))
1293

    
1294
NO_REMEMBER_OPT = cli_option("--no-remember",
1295
                             dest="no_remember",
1296
                             action="store_true", default=False,
1297
                             help="Perform but do not record the change"
1298
                             " in the configuration")
1299

    
1300
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1301
                              default=False, action="store_true",
1302
                              help="Evacuate primary instances only")
1303

    
1304
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1305
                                default=False, action="store_true",
1306
                                help="Evacuate secondary instances only"
1307
                                     " (applies only to internally mirrored"
1308
                                     " disk templates, e.g. %s)" %
1309
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1310

    
1311
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1312
                                action="store_true", default=False,
1313
                                help="Pause instance at startup")
1314

    
1315
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1316
                          help="Destination node group (name or uuid)",
1317
                          default=None, action="append",
1318
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1319

    
1320
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1321
                               action="append", dest="ignore_errors",
1322
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1323
                               help="Error code to be ignored")
1324

    
1325
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1326
                            action="append",
1327
                            help=("Specify disk state information in the format"
1328
                                  " storage_type/identifier:option=value,..."),
1329
                            type="identkeyval")
1330

    
1331
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1332
                          action="append",
1333
                          help=("Specify hypervisor state information in the"
1334
                                " format hypervisor:option=value,..."),
1335
                          type="identkeyval")
1336

    
1337

    
1338
#: Options provided by all commands
1339
COMMON_OPTS = [DEBUG_OPT]
1340

    
1341
# common options for creating instances. add and import then add their own
1342
# specific ones.
1343
COMMON_CREATE_OPTS = [
1344
  BACKEND_OPT,
1345
  DISK_OPT,
1346
  DISK_TEMPLATE_OPT,
1347
  FILESTORE_DIR_OPT,
1348
  FILESTORE_DRIVER_OPT,
1349
  HYPERVISOR_OPT,
1350
  IALLOCATOR_OPT,
1351
  NET_OPT,
1352
  NODE_PLACEMENT_OPT,
1353
  NOIPCHECK_OPT,
1354
  NONAMECHECK_OPT,
1355
  NONICS_OPT,
1356
  NWSYNC_OPT,
1357
  OSPARAMS_OPT,
1358
  OS_SIZE_OPT,
1359
  SUBMIT_OPT,
1360
  TAG_ADD_OPT,
1361
  DRY_RUN_OPT,
1362
  PRIORITY_OPT,
1363
  ]
1364

    
1365

    
1366
def _ParseArgs(argv, commands, aliases, env_override):
1367
  """Parser for the command line arguments.
1368

1369
  This function parses the arguments and returns the function which
1370
  must be executed together with its (modified) arguments.
1371

1372
  @param argv: the command line
1373
  @param commands: dictionary with special contents, see the design
1374
      doc for cmdline handling
1375
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1376
  @param env_override: list of env variables allowed for default args
1377

1378
  """
1379
  assert not (env_override - set(commands))
1380

    
1381
  if len(argv) == 0:
1382
    binary = "<command>"
1383
  else:
1384
    binary = argv[0].split("/")[-1]
1385

    
1386
  if len(argv) > 1 and argv[1] == "--version":
1387
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1388
             constants.RELEASE_VERSION)
1389
    # Quit right away. That way we don't have to care about this special
1390
    # argument. optparse.py does it the same.
1391
    sys.exit(0)
1392

    
1393
  if len(argv) < 2 or not (argv[1] in commands or
1394
                           argv[1] in aliases):
1395
    # let's do a nice thing
1396
    sortedcmds = commands.keys()
1397
    sortedcmds.sort()
1398

    
1399
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1400
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1401
    ToStdout("")
1402

    
1403
    # compute the max line length for cmd + usage
1404
    mlen = max([len(" %s" % cmd) for cmd in commands])
1405
    mlen = min(60, mlen) # should not get here...
1406

    
1407
    # and format a nice command list
1408
    ToStdout("Commands:")
1409
    for cmd in sortedcmds:
1410
      cmdstr = " %s" % (cmd,)
1411
      help_text = commands[cmd][4]
1412
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1413
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1414
      for line in help_lines:
1415
        ToStdout("%-*s   %s", mlen, "", line)
1416

    
1417
    ToStdout("")
1418

    
1419
    return None, None, None
1420

    
1421
  # get command, unalias it, and look it up in commands
1422
  cmd = argv.pop(1)
1423
  if cmd in aliases:
1424
    if cmd in commands:
1425
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1426
                                   " command" % cmd)
1427

    
1428
    if aliases[cmd] not in commands:
1429
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1430
                                   " command '%s'" % (cmd, aliases[cmd]))
1431

    
1432
    cmd = aliases[cmd]
1433

    
1434
  if cmd in env_override:
1435
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1436
    env_args = os.environ.get(args_env_name)
1437
    if env_args:
1438
      argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1439

    
1440
  func, args_def, parser_opts, usage, description = commands[cmd]
1441
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1442
                        description=description,
1443
                        formatter=TitledHelpFormatter(),
1444
                        usage="%%prog %s %s" % (cmd, usage))
1445
  parser.disable_interspersed_args()
1446
  options, args = parser.parse_args(args=argv[1:])
1447

    
1448
  if not _CheckArguments(cmd, args_def, args):
1449
    return None, None, None
1450

    
1451
  return func, options, args
1452

    
1453

    
1454
def _CheckArguments(cmd, args_def, args):
1455
  """Verifies the arguments using the argument definition.
1456

1457
  Algorithm:
1458

1459
    1. Abort with error if values specified by user but none expected.
1460

1461
    1. For each argument in definition
1462

1463
      1. Keep running count of minimum number of values (min_count)
1464
      1. Keep running count of maximum number of values (max_count)
1465
      1. If it has an unlimited number of values
1466

1467
        1. Abort with error if it's not the last argument in the definition
1468

1469
    1. If last argument has limited number of values
1470

1471
      1. Abort with error if number of values doesn't match or is too large
1472

1473
    1. Abort with error if user didn't pass enough values (min_count)
1474

1475
  """
1476
  if args and not args_def:
1477
    ToStderr("Error: Command %s expects no arguments", cmd)
1478
    return False
1479

    
1480
  min_count = None
1481
  max_count = None
1482
  check_max = None
1483

    
1484
  last_idx = len(args_def) - 1
1485

    
1486
  for idx, arg in enumerate(args_def):
1487
    if min_count is None:
1488
      min_count = arg.min
1489
    elif arg.min is not None:
1490
      min_count += arg.min
1491

    
1492
    if max_count is None:
1493
      max_count = arg.max
1494
    elif arg.max is not None:
1495
      max_count += arg.max
1496

    
1497
    if idx == last_idx:
1498
      check_max = (arg.max is not None)
1499

    
1500
    elif arg.max is None:
1501
      raise errors.ProgrammerError("Only the last argument can have max=None")
1502

    
1503
  if check_max:
1504
    # Command with exact number of arguments
1505
    if (min_count is not None and max_count is not None and
1506
        min_count == max_count and len(args) != min_count):
1507
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1508
      return False
1509

    
1510
    # Command with limited number of arguments
1511
    if max_count is not None and len(args) > max_count:
1512
      ToStderr("Error: Command %s expects only %d argument(s)",
1513
               cmd, max_count)
1514
      return False
1515

    
1516
  # Command with some required arguments
1517
  if min_count is not None and len(args) < min_count:
1518
    ToStderr("Error: Command %s expects at least %d argument(s)",
1519
             cmd, min_count)
1520
    return False
1521

    
1522
  return True
1523

    
1524

    
1525
def SplitNodeOption(value):
1526
  """Splits the value of a --node option.
1527

1528
  """
1529
  if value and ":" in value:
1530
    return value.split(":", 1)
1531
  else:
1532
    return (value, None)
1533

    
1534

    
1535
def CalculateOSNames(os_name, os_variants):
1536
  """Calculates all the names an OS can be called, according to its variants.
1537

1538
  @type os_name: string
1539
  @param os_name: base name of the os
1540
  @type os_variants: list or None
1541
  @param os_variants: list of supported variants
1542
  @rtype: list
1543
  @return: list of valid names
1544

1545
  """
1546
  if os_variants:
1547
    return ["%s+%s" % (os_name, v) for v in os_variants]
1548
  else:
1549
    return [os_name]
1550

    
1551

    
1552
def ParseFields(selected, default):
1553
  """Parses the values of "--field"-like options.
1554

1555
  @type selected: string or None
1556
  @param selected: User-selected options
1557
  @type default: list
1558
  @param default: Default fields
1559

1560
  """
1561
  if selected is None:
1562
    return default
1563

    
1564
  if selected.startswith("+"):
1565
    return default + selected[1:].split(",")
1566

    
1567
  return selected.split(",")
1568

    
1569

    
1570
UsesRPC = rpc.RunWithRPC
1571

    
1572

    
1573
def AskUser(text, choices=None):
1574
  """Ask the user a question.
1575

1576
  @param text: the question to ask
1577

1578
  @param choices: list with elements tuples (input_char, return_value,
1579
      description); if not given, it will default to: [('y', True,
1580
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1581
      note that the '?' char is reserved for help
1582

1583
  @return: one of the return values from the choices list; if input is
1584
      not possible (i.e. not running with a tty, we return the last
1585
      entry from the list
1586

1587
  """
1588
  if choices is None:
1589
    choices = [("y", True, "Perform the operation"),
1590
               ("n", False, "Do not perform the operation")]
1591
  if not choices or not isinstance(choices, list):
1592
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1593
  for entry in choices:
1594
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1595
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1596

    
1597
  answer = choices[-1][1]
1598
  new_text = []
1599
  for line in text.splitlines():
1600
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1601
  text = "\n".join(new_text)
1602
  try:
1603
    f = file("/dev/tty", "a+")
1604
  except IOError:
1605
    return answer
1606
  try:
1607
    chars = [entry[0] for entry in choices]
1608
    chars[-1] = "[%s]" % chars[-1]
1609
    chars.append("?")
1610
    maps = dict([(entry[0], entry[1]) for entry in choices])
1611
    while True:
1612
      f.write(text)
1613
      f.write("\n")
1614
      f.write("/".join(chars))
1615
      f.write(": ")
1616
      line = f.readline(2).strip().lower()
1617
      if line in maps:
1618
        answer = maps[line]
1619
        break
1620
      elif line == "?":
1621
        for entry in choices:
1622
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1623
        f.write("\n")
1624
        continue
1625
  finally:
1626
    f.close()
1627
  return answer
1628

    
1629

    
1630
class JobSubmittedException(Exception):
1631
  """Job was submitted, client should exit.
1632

1633
  This exception has one argument, the ID of the job that was
1634
  submitted. The handler should print this ID.
1635

1636
  This is not an error, just a structured way to exit from clients.
1637

1638
  """
1639

    
1640

    
1641
def SendJob(ops, cl=None):
1642
  """Function to submit an opcode without waiting for the results.
1643

1644
  @type ops: list
1645
  @param ops: list of opcodes
1646
  @type cl: luxi.Client
1647
  @param cl: the luxi client to use for communicating with the master;
1648
             if None, a new client will be created
1649

1650
  """
1651
  if cl is None:
1652
    cl = GetClient()
1653

    
1654
  job_id = cl.SubmitJob(ops)
1655

    
1656
  return job_id
1657

    
1658

    
1659
def GenericPollJob(job_id, cbs, report_cbs):
1660
  """Generic job-polling function.
1661

1662
  @type job_id: number
1663
  @param job_id: Job ID
1664
  @type cbs: Instance of L{JobPollCbBase}
1665
  @param cbs: Data callbacks
1666
  @type report_cbs: Instance of L{JobPollReportCbBase}
1667
  @param report_cbs: Reporting callbacks
1668

1669
  """
1670
  prev_job_info = None
1671
  prev_logmsg_serial = None
1672

    
1673
  status = None
1674

    
1675
  while True:
1676
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1677
                                      prev_logmsg_serial)
1678
    if not result:
1679
      # job not found, go away!
1680
      raise errors.JobLost("Job with id %s lost" % job_id)
1681

    
1682
    if result == constants.JOB_NOTCHANGED:
1683
      report_cbs.ReportNotChanged(job_id, status)
1684

    
1685
      # Wait again
1686
      continue
1687

    
1688
    # Split result, a tuple of (field values, log entries)
1689
    (job_info, log_entries) = result
1690
    (status, ) = job_info
1691

    
1692
    if log_entries:
1693
      for log_entry in log_entries:
1694
        (serial, timestamp, log_type, message) = log_entry
1695
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1696
                                    log_type, message)
1697
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1698

    
1699
    # TODO: Handle canceled and archived jobs
1700
    elif status in (constants.JOB_STATUS_SUCCESS,
1701
                    constants.JOB_STATUS_ERROR,
1702
                    constants.JOB_STATUS_CANCELING,
1703
                    constants.JOB_STATUS_CANCELED):
1704
      break
1705

    
1706
    prev_job_info = job_info
1707

    
1708
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1709
  if not jobs:
1710
    raise errors.JobLost("Job with id %s lost" % job_id)
1711

    
1712
  status, opstatus, result = jobs[0]
1713

    
1714
  if status == constants.JOB_STATUS_SUCCESS:
1715
    return result
1716

    
1717
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1718
    raise errors.OpExecError("Job was canceled")
1719

    
1720
  has_ok = False
1721
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1722
    if status == constants.OP_STATUS_SUCCESS:
1723
      has_ok = True
1724
    elif status == constants.OP_STATUS_ERROR:
1725
      errors.MaybeRaise(msg)
1726

    
1727
      if has_ok:
1728
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1729
                                 (idx, msg))
1730

    
1731
      raise errors.OpExecError(str(msg))
1732

    
1733
  # default failure mode
1734
  raise errors.OpExecError(result)
1735

    
1736

    
1737
class JobPollCbBase:
1738
  """Base class for L{GenericPollJob} callbacks.
1739

1740
  """
1741
  def __init__(self):
1742
    """Initializes this class.
1743

1744
    """
1745

    
1746
  def WaitForJobChangeOnce(self, job_id, fields,
1747
                           prev_job_info, prev_log_serial):
1748
    """Waits for changes on a job.
1749

1750
    """
1751
    raise NotImplementedError()
1752

    
1753
  def QueryJobs(self, job_ids, fields):
1754
    """Returns the selected fields for the selected job IDs.
1755

1756
    @type job_ids: list of numbers
1757
    @param job_ids: Job IDs
1758
    @type fields: list of strings
1759
    @param fields: Fields
1760

1761
    """
1762
    raise NotImplementedError()
1763

    
1764

    
1765
class JobPollReportCbBase:
1766
  """Base class for L{GenericPollJob} reporting callbacks.
1767

1768
  """
1769
  def __init__(self):
1770
    """Initializes this class.
1771

1772
    """
1773

    
1774
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1775
    """Handles a log message.
1776

1777
    """
1778
    raise NotImplementedError()
1779

    
1780
  def ReportNotChanged(self, job_id, status):
1781
    """Called for if a job hasn't changed in a while.
1782

1783
    @type job_id: number
1784
    @param job_id: Job ID
1785
    @type status: string or None
1786
    @param status: Job status if available
1787

1788
    """
1789
    raise NotImplementedError()
1790

    
1791

    
1792
class _LuxiJobPollCb(JobPollCbBase):
1793
  def __init__(self, cl):
1794
    """Initializes this class.
1795

1796
    """
1797
    JobPollCbBase.__init__(self)
1798
    self.cl = cl
1799

    
1800
  def WaitForJobChangeOnce(self, job_id, fields,
1801
                           prev_job_info, prev_log_serial):
1802
    """Waits for changes on a job.
1803

1804
    """
1805
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1806
                                        prev_job_info, prev_log_serial)
1807

    
1808
  def QueryJobs(self, job_ids, fields):
1809
    """Returns the selected fields for the selected job IDs.
1810

1811
    """
1812
    return self.cl.QueryJobs(job_ids, fields)
1813

    
1814

    
1815
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1816
  def __init__(self, feedback_fn):
1817
    """Initializes this class.
1818

1819
    """
1820
    JobPollReportCbBase.__init__(self)
1821

    
1822
    self.feedback_fn = feedback_fn
1823

    
1824
    assert callable(feedback_fn)
1825

    
1826
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1827
    """Handles a log message.
1828

1829
    """
1830
    self.feedback_fn((timestamp, log_type, log_msg))
1831

    
1832
  def ReportNotChanged(self, job_id, status):
1833
    """Called if a job hasn't changed in a while.
1834

1835
    """
1836
    # Ignore
1837

    
1838

    
1839
class StdioJobPollReportCb(JobPollReportCbBase):
1840
  def __init__(self):
1841
    """Initializes this class.
1842

1843
    """
1844
    JobPollReportCbBase.__init__(self)
1845

    
1846
    self.notified_queued = False
1847
    self.notified_waitlock = False
1848

    
1849
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1850
    """Handles a log message.
1851

1852
    """
1853
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1854
             FormatLogMessage(log_type, log_msg))
1855

    
1856
  def ReportNotChanged(self, job_id, status):
1857
    """Called if a job hasn't changed in a while.
1858

1859
    """
1860
    if status is None:
1861
      return
1862

    
1863
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1864
      ToStderr("Job %s is waiting in queue", job_id)
1865
      self.notified_queued = True
1866

    
1867
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1868
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1869
      self.notified_waitlock = True
1870

    
1871

    
1872
def FormatLogMessage(log_type, log_msg):
1873
  """Formats a job message according to its type.
1874

1875
  """
1876
  if log_type != constants.ELOG_MESSAGE:
1877
    log_msg = str(log_msg)
1878

    
1879
  return utils.SafeEncode(log_msg)
1880

    
1881

    
1882
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1883
  """Function to poll for the result of a job.
1884

1885
  @type job_id: job identified
1886
  @param job_id: the job to poll for results
1887
  @type cl: luxi.Client
1888
  @param cl: the luxi client to use for communicating with the master;
1889
             if None, a new client will be created
1890

1891
  """
1892
  if cl is None:
1893
    cl = GetClient()
1894

    
1895
  if reporter is None:
1896
    if feedback_fn:
1897
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1898
    else:
1899
      reporter = StdioJobPollReportCb()
1900
  elif feedback_fn:
1901
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1902

    
1903
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1904

    
1905

    
1906
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1907
  """Legacy function to submit an opcode.
1908

1909
  This is just a simple wrapper over the construction of the processor
1910
  instance. It should be extended to better handle feedback and
1911
  interaction functions.
1912

1913
  """
1914
  if cl is None:
1915
    cl = GetClient()
1916

    
1917
  SetGenericOpcodeOpts([op], opts)
1918

    
1919
  job_id = SendJob([op], cl=cl)
1920

    
1921
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1922
                       reporter=reporter)
1923

    
1924
  return op_results[0]
1925

    
1926

    
1927
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1928
  """Wrapper around SubmitOpCode or SendJob.
1929

1930
  This function will decide, based on the 'opts' parameter, whether to
1931
  submit and wait for the result of the opcode (and return it), or
1932
  whether to just send the job and print its identifier. It is used in
1933
  order to simplify the implementation of the '--submit' option.
1934

1935
  It will also process the opcodes if we're sending the via SendJob
1936
  (otherwise SubmitOpCode does it).
1937

1938
  """
1939
  if opts and opts.submit_only:
1940
    job = [op]
1941
    SetGenericOpcodeOpts(job, opts)
1942
    job_id = SendJob(job, cl=cl)
1943
    raise JobSubmittedException(job_id)
1944
  else:
1945
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1946

    
1947

    
1948
def SetGenericOpcodeOpts(opcode_list, options):
1949
  """Processor for generic options.
1950

1951
  This function updates the given opcodes based on generic command
1952
  line options (like debug, dry-run, etc.).
1953

1954
  @param opcode_list: list of opcodes
1955
  @param options: command line options or None
1956
  @return: None (in-place modification)
1957

1958
  """
1959
  if not options:
1960
    return
1961
  for op in opcode_list:
1962
    op.debug_level = options.debug
1963
    if hasattr(options, "dry_run"):
1964
      op.dry_run = options.dry_run
1965
    if getattr(options, "priority", None) is not None:
1966
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1967

    
1968

    
1969
def GetClient():
1970
  # TODO: Cache object?
1971
  try:
1972
    client = luxi.Client()
1973
  except luxi.NoMasterError:
1974
    ss = ssconf.SimpleStore()
1975

    
1976
    # Try to read ssconf file
1977
    try:
1978
      ss.GetMasterNode()
1979
    except errors.ConfigurationError:
1980
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1981
                                 " not part of a cluster")
1982

    
1983
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1984
    if master != myself:
1985
      raise errors.OpPrereqError("This is not the master node, please connect"
1986
                                 " to node '%s' and rerun the command" %
1987
                                 master)
1988
    raise
1989
  return client
1990

    
1991

    
1992
def FormatError(err):
1993
  """Return a formatted error message for a given error.
1994

1995
  This function takes an exception instance and returns a tuple
1996
  consisting of two values: first, the recommended exit code, and
1997
  second, a string describing the error message (not
1998
  newline-terminated).
1999

2000
  """
2001
  retcode = 1
2002
  obuf = StringIO()
2003
  msg = str(err)
2004
  if isinstance(err, errors.ConfigurationError):
2005
    txt = "Corrupt configuration file: %s" % msg
2006
    logging.error(txt)
2007
    obuf.write(txt + "\n")
2008
    obuf.write("Aborting.")
2009
    retcode = 2
2010
  elif isinstance(err, errors.HooksAbort):
2011
    obuf.write("Failure: hooks execution failed:\n")
2012
    for node, script, out in err.args[0]:
2013
      if out:
2014
        obuf.write("  node: %s, script: %s, output: %s\n" %
2015
                   (node, script, out))
2016
      else:
2017
        obuf.write("  node: %s, script: %s (no output)\n" %
2018
                   (node, script))
2019
  elif isinstance(err, errors.HooksFailure):
2020
    obuf.write("Failure: hooks general failure: %s" % msg)
2021
  elif isinstance(err, errors.ResolverError):
2022
    this_host = netutils.Hostname.GetSysName()
2023
    if err.args[0] == this_host:
2024
      msg = "Failure: can't resolve my own hostname ('%s')"
2025
    else:
2026
      msg = "Failure: can't resolve hostname '%s'"
2027
    obuf.write(msg % err.args[0])
2028
  elif isinstance(err, errors.OpPrereqError):
2029
    if len(err.args) == 2:
2030
      obuf.write("Failure: prerequisites not met for this"
2031
               " operation:\nerror type: %s, error details:\n%s" %
2032
                 (err.args[1], err.args[0]))
2033
    else:
2034
      obuf.write("Failure: prerequisites not met for this"
2035
                 " operation:\n%s" % msg)
2036
  elif isinstance(err, errors.OpExecError):
2037
    obuf.write("Failure: command execution error:\n%s" % msg)
2038
  elif isinstance(err, errors.TagError):
2039
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2040
  elif isinstance(err, errors.JobQueueDrainError):
2041
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2042
               " accept new requests\n")
2043
  elif isinstance(err, errors.JobQueueFull):
2044
    obuf.write("Failure: the job queue is full and doesn't accept new"
2045
               " job submissions until old jobs are archived\n")
2046
  elif isinstance(err, errors.TypeEnforcementError):
2047
    obuf.write("Parameter Error: %s" % msg)
2048
  elif isinstance(err, errors.ParameterError):
2049
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2050
  elif isinstance(err, luxi.NoMasterError):
2051
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2052
               " and listening for connections?")
2053
  elif isinstance(err, luxi.TimeoutError):
2054
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2055
               " been submitted and will continue to run even if the call"
2056
               " timed out. Useful commands in this situation are \"gnt-job"
2057
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2058
    obuf.write(msg)
2059
  elif isinstance(err, luxi.PermissionError):
2060
    obuf.write("It seems you don't have permissions to connect to the"
2061
               " master daemon.\nPlease retry as a different user.")
2062
  elif isinstance(err, luxi.ProtocolError):
2063
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2064
               "%s" % msg)
2065
  elif isinstance(err, errors.JobLost):
2066
    obuf.write("Error checking job status: %s" % msg)
2067
  elif isinstance(err, errors.QueryFilterParseError):
2068
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2069
    obuf.write("\n".join(err.GetDetails()))
2070
  elif isinstance(err, errors.GenericError):
2071
    obuf.write("Unhandled Ganeti error: %s" % msg)
2072
  elif isinstance(err, JobSubmittedException):
2073
    obuf.write("JobID: %s\n" % err.args[0])
2074
    retcode = 0
2075
  else:
2076
    obuf.write("Unhandled exception: %s" % msg)
2077
  return retcode, obuf.getvalue().rstrip("\n")
2078

    
2079

    
2080
def GenericMain(commands, override=None, aliases=None,
2081
                env_override=frozenset()):
2082
  """Generic main function for all the gnt-* commands.
2083

2084
  @param commands: a dictionary with a special structure, see the design doc
2085
                   for command line handling.
2086
  @param override: if not None, we expect a dictionary with keys that will
2087
                   override command line options; this can be used to pass
2088
                   options from the scripts to generic functions
2089
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2090
  @param env_override: list of environment names which are allowed to submit
2091
                       default args for commands
2092

2093
  """
2094
  # save the program name and the entire command line for later logging
2095
  if sys.argv:
2096
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2097
    if len(sys.argv) >= 2:
2098
      binary += " " + sys.argv[1]
2099
      old_cmdline = " ".join(sys.argv[2:])
2100
    else:
2101
      old_cmdline = ""
2102
  else:
2103
    binary = "<unknown program>"
2104
    old_cmdline = ""
2105

    
2106
  if aliases is None:
2107
    aliases = {}
2108

    
2109
  try:
2110
    func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2111
  except errors.ParameterError, err:
2112
    result, err_msg = FormatError(err)
2113
    ToStderr(err_msg)
2114
    return 1
2115

    
2116
  if func is None: # parse error
2117
    return 1
2118

    
2119
  if override is not None:
2120
    for key, val in override.iteritems():
2121
      setattr(options, key, val)
2122

    
2123
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2124
                     stderr_logging=True)
2125

    
2126
  if old_cmdline:
2127
    logging.info("run with arguments '%s'", old_cmdline)
2128
  else:
2129
    logging.info("run with no arguments")
2130

    
2131
  try:
2132
    result = func(options, args)
2133
  except (errors.GenericError, luxi.ProtocolError,
2134
          JobSubmittedException), err:
2135
    result, err_msg = FormatError(err)
2136
    logging.exception("Error during command processing")
2137
    ToStderr(err_msg)
2138
  except KeyboardInterrupt:
2139
    result = constants.EXIT_FAILURE
2140
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2141
             " might have been submitted and"
2142
             " will continue to run in the background.")
2143
  except IOError, err:
2144
    if err.errno == errno.EPIPE:
2145
      # our terminal went away, we'll exit
2146
      sys.exit(constants.EXIT_FAILURE)
2147
    else:
2148
      raise
2149

    
2150
  return result
2151

    
2152

    
2153
def ParseNicOption(optvalue):
2154
  """Parses the value of the --net option(s).
2155

2156
  """
2157
  try:
2158
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2159
  except (TypeError, ValueError), err:
2160
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2161

    
2162
  nics = [{}] * nic_max
2163
  for nidx, ndict in optvalue:
2164
    nidx = int(nidx)
2165

    
2166
    if not isinstance(ndict, dict):
2167
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2168
                                 " got %s" % (nidx, ndict))
2169

    
2170
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2171

    
2172
    nics[nidx] = ndict
2173

    
2174
  return nics
2175

    
2176

    
2177
def GenericInstanceCreate(mode, opts, args):
2178
  """Add an instance to the cluster via either creation or import.
2179

2180
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2181
  @param opts: the command line options selected by the user
2182
  @type args: list
2183
  @param args: should contain only one element, the new instance name
2184
  @rtype: int
2185
  @return: the desired exit code
2186

2187
  """
2188
  instance = args[0]
2189

    
2190
  (pnode, snode) = SplitNodeOption(opts.node)
2191

    
2192
  hypervisor = None
2193
  hvparams = {}
2194
  if opts.hypervisor:
2195
    hypervisor, hvparams = opts.hypervisor
2196

    
2197
  if opts.nics:
2198
    nics = ParseNicOption(opts.nics)
2199
  elif opts.no_nics:
2200
    # no nics
2201
    nics = []
2202
  elif mode == constants.INSTANCE_CREATE:
2203
    # default of one nic, all auto
2204
    nics = [{}]
2205
  else:
2206
    # mode == import
2207
    nics = []
2208

    
2209
  if opts.disk_template == constants.DT_DISKLESS:
2210
    if opts.disks or opts.sd_size is not None:
2211
      raise errors.OpPrereqError("Diskless instance but disk"
2212
                                 " information passed")
2213
    disks = []
2214
  else:
2215
    if (not opts.disks and not opts.sd_size
2216
        and mode == constants.INSTANCE_CREATE):
2217
      raise errors.OpPrereqError("No disk information specified")
2218
    if opts.disks and opts.sd_size is not None:
2219
      raise errors.OpPrereqError("Please use either the '--disk' or"
2220
                                 " '-s' option")
2221
    if opts.sd_size is not None:
2222
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2223

    
2224
    if opts.disks:
2225
      try:
2226
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2227
      except ValueError, err:
2228
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2229
      disks = [{}] * disk_max
2230
    else:
2231
      disks = []
2232
    for didx, ddict in opts.disks:
2233
      didx = int(didx)
2234
      if not isinstance(ddict, dict):
2235
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2236
        raise errors.OpPrereqError(msg)
2237
      elif constants.IDISK_SIZE in ddict:
2238
        if constants.IDISK_ADOPT in ddict:
2239
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2240
                                     " (disk %d)" % didx)
2241
        try:
2242
          ddict[constants.IDISK_SIZE] = \
2243
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2244
        except ValueError, err:
2245
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2246
                                     (didx, err))
2247
      elif constants.IDISK_ADOPT in ddict:
2248
        if mode == constants.INSTANCE_IMPORT:
2249
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2250
                                     " import")
2251
        ddict[constants.IDISK_SIZE] = 0
2252
      else:
2253
        raise errors.OpPrereqError("Missing size or adoption source for"
2254
                                   " disk %d" % didx)
2255
      disks[didx] = ddict
2256

    
2257
  if opts.tags is not None:
2258
    tags = opts.tags.split(",")
2259
  else:
2260
    tags = []
2261

    
2262
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2263
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2264

    
2265
  if mode == constants.INSTANCE_CREATE:
2266
    start = opts.start
2267
    os_type = opts.os
2268
    force_variant = opts.force_variant
2269
    src_node = None
2270
    src_path = None
2271
    no_install = opts.no_install
2272
    identify_defaults = False
2273
  elif mode == constants.INSTANCE_IMPORT:
2274
    start = False
2275
    os_type = None
2276
    force_variant = False
2277
    src_node = opts.src_node
2278
    src_path = opts.src_dir
2279
    no_install = None
2280
    identify_defaults = opts.identify_defaults
2281
  else:
2282
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2283

    
2284
  op = opcodes.OpInstanceCreate(instance_name=instance,
2285
                                disks=disks,
2286
                                disk_template=opts.disk_template,
2287
                                nics=nics,
2288
                                pnode=pnode, snode=snode,
2289
                                ip_check=opts.ip_check,
2290
                                name_check=opts.name_check,
2291
                                wait_for_sync=opts.wait_for_sync,
2292
                                file_storage_dir=opts.file_storage_dir,
2293
                                file_driver=opts.file_driver,
2294
                                iallocator=opts.iallocator,
2295
                                hypervisor=hypervisor,
2296
                                hvparams=hvparams,
2297
                                beparams=opts.beparams,
2298
                                osparams=opts.osparams,
2299
                                mode=mode,
2300
                                start=start,
2301
                                os_type=os_type,
2302
                                force_variant=force_variant,
2303
                                src_node=src_node,
2304
                                src_path=src_path,
2305
                                tags=tags,
2306
                                no_install=no_install,
2307
                                identify_defaults=identify_defaults)
2308

    
2309
  SubmitOrSend(op, opts)
2310
  return 0
2311

    
2312

    
2313
class _RunWhileClusterStoppedHelper:
2314
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2315

2316
  """
2317
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2318
    """Initializes this class.
2319

2320
    @type feedback_fn: callable
2321
    @param feedback_fn: Feedback function
2322
    @type cluster_name: string
2323
    @param cluster_name: Cluster name
2324
    @type master_node: string
2325
    @param master_node Master node name
2326
    @type online_nodes: list
2327
    @param online_nodes: List of names of online nodes
2328

2329
    """
2330
    self.feedback_fn = feedback_fn
2331
    self.cluster_name = cluster_name
2332
    self.master_node = master_node
2333
    self.online_nodes = online_nodes
2334

    
2335
    self.ssh = ssh.SshRunner(self.cluster_name)
2336

    
2337
    self.nonmaster_nodes = [name for name in online_nodes
2338
                            if name != master_node]
2339

    
2340
    assert self.master_node not in self.nonmaster_nodes
2341

    
2342
  def _RunCmd(self, node_name, cmd):
2343
    """Runs a command on the local or a remote machine.
2344

2345
    @type node_name: string
2346
    @param node_name: Machine name
2347
    @type cmd: list
2348
    @param cmd: Command
2349

2350
    """
2351
    if node_name is None or node_name == self.master_node:
2352
      # No need to use SSH
2353
      result = utils.RunCmd(cmd)
2354
    else:
2355
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2356

    
2357
    if result.failed:
2358
      errmsg = ["Failed to run command %s" % result.cmd]
2359
      if node_name:
2360
        errmsg.append("on node %s" % node_name)
2361
      errmsg.append(": exitcode %s and error %s" %
2362
                    (result.exit_code, result.output))
2363
      raise errors.OpExecError(" ".join(errmsg))
2364

    
2365
  def Call(self, fn, *args):
2366
    """Call function while all daemons are stopped.
2367

2368
    @type fn: callable
2369
    @param fn: Function to be called
2370

2371
    """
2372
    # Pause watcher by acquiring an exclusive lock on watcher state file
2373
    self.feedback_fn("Blocking watcher")
2374
    watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2375
    try:
2376
      # TODO: Currently, this just blocks. There's no timeout.
2377
      # TODO: Should it be a shared lock?
2378
      watcher_block.Exclusive(blocking=True)
2379

    
2380
      # Stop master daemons, so that no new jobs can come in and all running
2381
      # ones are finished
2382
      self.feedback_fn("Stopping master daemons")
2383
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2384
      try:
2385
        # Stop daemons on all nodes
2386
        for node_name in self.online_nodes:
2387
          self.feedback_fn("Stopping daemons on %s" % node_name)
2388
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2389

    
2390
        # All daemons are shut down now
2391
        try:
2392
          return fn(self, *args)
2393
        except Exception, err:
2394
          _, errmsg = FormatError(err)
2395
          logging.exception("Caught exception")
2396
          self.feedback_fn(errmsg)
2397
          raise
2398
      finally:
2399
        # Start cluster again, master node last
2400
        for node_name in self.nonmaster_nodes + [self.master_node]:
2401
          self.feedback_fn("Starting daemons on %s" % node_name)
2402
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2403
    finally:
2404
      # Resume watcher
2405
      watcher_block.Close()
2406

    
2407

    
2408
def RunWhileClusterStopped(feedback_fn, fn, *args):
2409
  """Calls a function while all cluster daemons are stopped.
2410

2411
  @type feedback_fn: callable
2412
  @param feedback_fn: Feedback function
2413
  @type fn: callable
2414
  @param fn: Function to be called when daemons are stopped
2415

2416
  """
2417
  feedback_fn("Gathering cluster information")
2418

    
2419
  # This ensures we're running on the master daemon
2420
  cl = GetClient()
2421

    
2422
  (cluster_name, master_node) = \
2423
    cl.QueryConfigValues(["cluster_name", "master_node"])
2424

    
2425
  online_nodes = GetOnlineNodes([], cl=cl)
2426

    
2427
  # Don't keep a reference to the client. The master daemon will go away.
2428
  del cl
2429

    
2430
  assert master_node in online_nodes
2431

    
2432
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2433
                                       online_nodes).Call(fn, *args)
2434

    
2435

    
2436
def GenerateTable(headers, fields, separator, data,
2437
                  numfields=None, unitfields=None,
2438
                  units=None):
2439
  """Prints a table with headers and different fields.
2440

2441
  @type headers: dict
2442
  @param headers: dictionary mapping field names to headers for
2443
      the table
2444
  @type fields: list
2445
  @param fields: the field names corresponding to each row in
2446
      the data field
2447
  @param separator: the separator to be used; if this is None,
2448
      the default 'smart' algorithm is used which computes optimal
2449
      field width, otherwise just the separator is used between
2450
      each field
2451
  @type data: list
2452
  @param data: a list of lists, each sublist being one row to be output
2453
  @type numfields: list
2454
  @param numfields: a list with the fields that hold numeric
2455
      values and thus should be right-aligned
2456
  @type unitfields: list
2457
  @param unitfields: a list with the fields that hold numeric
2458
      values that should be formatted with the units field
2459
  @type units: string or None
2460
  @param units: the units we should use for formatting, or None for
2461
      automatic choice (human-readable for non-separator usage, otherwise
2462
      megabytes); this is a one-letter string
2463

2464
  """
2465
  if units is None:
2466
    if separator:
2467
      units = "m"
2468
    else:
2469
      units = "h"
2470

    
2471
  if numfields is None:
2472
    numfields = []
2473
  if unitfields is None:
2474
    unitfields = []
2475

    
2476
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2477
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2478

    
2479
  format_fields = []
2480
  for field in fields:
2481
    if headers and field not in headers:
2482
      # TODO: handle better unknown fields (either revert to old
2483
      # style of raising exception, or deal more intelligently with
2484
      # variable fields)
2485
      headers[field] = field
2486
    if separator is not None:
2487
      format_fields.append("%s")
2488
    elif numfields.Matches(field):
2489
      format_fields.append("%*s")
2490
    else:
2491
      format_fields.append("%-*s")
2492

    
2493
  if separator is None:
2494
    mlens = [0 for name in fields]
2495
    format_str = " ".join(format_fields)
2496
  else:
2497
    format_str = separator.replace("%", "%%").join(format_fields)
2498

    
2499
  for row in data:
2500
    if row is None:
2501
      continue
2502
    for idx, val in enumerate(row):
2503
      if unitfields.Matches(fields[idx]):
2504
        try:
2505
          val = int(val)
2506
        except (TypeError, ValueError):
2507
          pass
2508
        else:
2509
          val = row[idx] = utils.FormatUnit(val, units)
2510
      val = row[idx] = str(val)
2511
      if separator is None:
2512
        mlens[idx] = max(mlens[idx], len(val))
2513

    
2514
  result = []
2515
  if headers:
2516
    args = []
2517
    for idx, name in enumerate(fields):
2518
      hdr = headers[name]
2519
      if separator is None:
2520
        mlens[idx] = max(mlens[idx], len(hdr))
2521
        args.append(mlens[idx])
2522
      args.append(hdr)
2523
    result.append(format_str % tuple(args))
2524

    
2525
  if separator is None:
2526
    assert len(mlens) == len(fields)
2527

    
2528
    if fields and not numfields.Matches(fields[-1]):
2529
      mlens[-1] = 0
2530

    
2531
  for line in data:
2532
    args = []
2533
    if line is None:
2534
      line = ["-" for _ in fields]
2535
    for idx in range(len(fields)):
2536
      if separator is None:
2537
        args.append(mlens[idx])
2538
      args.append(line[idx])
2539
    result.append(format_str % tuple(args))
2540

    
2541
  return result
2542

    
2543

    
2544
def _FormatBool(value):
2545
  """Formats a boolean value as a string.
2546

2547
  """
2548
  if value:
2549
    return "Y"
2550
  return "N"
2551

    
2552

    
2553
#: Default formatting for query results; (callback, align right)
2554
_DEFAULT_FORMAT_QUERY = {
2555
  constants.QFT_TEXT: (str, False),
2556
  constants.QFT_BOOL: (_FormatBool, False),
2557
  constants.QFT_NUMBER: (str, True),
2558
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2559
  constants.QFT_OTHER: (str, False),
2560
  constants.QFT_UNKNOWN: (str, False),
2561
  }
2562

    
2563

    
2564
def _GetColumnFormatter(fdef, override, unit):
2565
  """Returns formatting function for a field.
2566

2567
  @type fdef: L{objects.QueryFieldDefinition}
2568
  @type override: dict
2569
  @param override: Dictionary for overriding field formatting functions,
2570
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2571
  @type unit: string
2572
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2573
  @rtype: tuple; (callable, bool)
2574
  @return: Returns the function to format a value (takes one parameter) and a
2575
    boolean for aligning the value on the right-hand side
2576

2577
  """
2578
  fmt = override.get(fdef.name, None)
2579
  if fmt is not None:
2580
    return fmt
2581

    
2582
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2583

    
2584
  if fdef.kind == constants.QFT_UNIT:
2585
    # Can't keep this information in the static dictionary
2586
    return (lambda value: utils.FormatUnit(value, unit), True)
2587

    
2588
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2589
  if fmt is not None:
2590
    return fmt
2591

    
2592
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2593

    
2594

    
2595
class _QueryColumnFormatter:
2596
  """Callable class for formatting fields of a query.
2597

2598
  """
2599
  def __init__(self, fn, status_fn, verbose):
2600
    """Initializes this class.
2601

2602
    @type fn: callable
2603
    @param fn: Formatting function
2604
    @type status_fn: callable
2605
    @param status_fn: Function to report fields' status
2606
    @type verbose: boolean
2607
    @param verbose: whether to use verbose field descriptions or not
2608

2609
    """
2610
    self._fn = fn
2611
    self._status_fn = status_fn
2612
    self._verbose = verbose
2613

    
2614
  def __call__(self, data):
2615
    """Returns a field's string representation.
2616

2617
    """
2618
    (status, value) = data
2619

    
2620
    # Report status
2621
    self._status_fn(status)
2622

    
2623
    if status == constants.RS_NORMAL:
2624
      return self._fn(value)
2625

    
2626
    assert value is None, \
2627
           "Found value %r for abnormal status %s" % (value, status)
2628

    
2629
    return FormatResultError(status, self._verbose)
2630

    
2631

    
2632
def FormatResultError(status, verbose):
2633
  """Formats result status other than L{constants.RS_NORMAL}.
2634

2635
  @param status: The result status
2636
  @type verbose: boolean
2637
  @param verbose: Whether to return the verbose text
2638
  @return: Text of result status
2639

2640
  """
2641
  assert status != constants.RS_NORMAL, \
2642
         "FormatResultError called with status equal to constants.RS_NORMAL"
2643
  try:
2644
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2645
  except KeyError:
2646
    raise NotImplementedError("Unknown status %s" % status)
2647
  else:
2648
    if verbose:
2649
      return verbose_text
2650
    return normal_text
2651

    
2652

    
2653
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2654
                      header=False, verbose=False):
2655
  """Formats data in L{objects.QueryResponse}.
2656

2657
  @type result: L{objects.QueryResponse}
2658
  @param result: result of query operation
2659
  @type unit: string
2660
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2661
    see L{utils.text.FormatUnit}
2662
  @type format_override: dict
2663
  @param format_override: Dictionary for overriding field formatting functions,
2664
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2665
  @type separator: string or None
2666
  @param separator: String used to separate fields
2667
  @type header: bool
2668
  @param header: Whether to output header row
2669
  @type verbose: boolean
2670
  @param verbose: whether to use verbose field descriptions or not
2671

2672
  """
2673
  if unit is None:
2674
    if separator:
2675
      unit = "m"
2676
    else:
2677
      unit = "h"
2678

    
2679
  if format_override is None:
2680
    format_override = {}
2681

    
2682
  stats = dict.fromkeys(constants.RS_ALL, 0)
2683

    
2684
  def _RecordStatus(status):
2685
    if status in stats:
2686
      stats[status] += 1
2687

    
2688
  columns = []
2689
  for fdef in result.fields:
2690
    assert fdef.title and fdef.name
2691
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2692
    columns.append(TableColumn(fdef.title,
2693
                               _QueryColumnFormatter(fn, _RecordStatus,
2694
                                                     verbose),
2695
                               align_right))
2696

    
2697
  table = FormatTable(result.data, columns, header, separator)
2698

    
2699
  # Collect statistics
2700
  assert len(stats) == len(constants.RS_ALL)
2701
  assert compat.all(count >= 0 for count in stats.values())
2702

    
2703
  # Determine overall status. If there was no data, unknown fields must be
2704
  # detected via the field definitions.
2705
  if (stats[constants.RS_UNKNOWN] or
2706
      (not result.data and _GetUnknownFields(result.fields))):
2707
    status = QR_UNKNOWN
2708
  elif compat.any(count > 0 for key, count in stats.items()
2709
                  if key != constants.RS_NORMAL):
2710
    status = QR_INCOMPLETE
2711
  else:
2712
    status = QR_NORMAL
2713

    
2714
  return (status, table)
2715

    
2716

    
2717
def _GetUnknownFields(fdefs):
2718
  """Returns list of unknown fields included in C{fdefs}.
2719

2720
  @type fdefs: list of L{objects.QueryFieldDefinition}
2721

2722
  """
2723
  return [fdef for fdef in fdefs
2724
          if fdef.kind == constants.QFT_UNKNOWN]
2725

    
2726

    
2727
def _WarnUnknownFields(fdefs):
2728
  """Prints a warning to stderr if a query included unknown fields.
2729

2730
  @type fdefs: list of L{objects.QueryFieldDefinition}
2731

2732
  """
2733
  unknown = _GetUnknownFields(fdefs)
2734
  if unknown:
2735
    ToStderr("Warning: Queried for unknown fields %s",
2736
             utils.CommaJoin(fdef.name for fdef in unknown))
2737
    return True
2738

    
2739
  return False
2740

    
2741

    
2742
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2743
                format_override=None, verbose=False, force_filter=False):
2744
  """Generic implementation for listing all items of a resource.
2745

2746
  @param resource: One of L{constants.QR_VIA_LUXI}
2747
  @type fields: list of strings
2748
  @param fields: List of fields to query for
2749
  @type names: list of strings
2750
  @param names: Names of items to query for
2751
  @type unit: string or None
2752
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2753
    None for automatic choice (human-readable for non-separator usage,
2754
    otherwise megabytes); this is a one-letter string
2755
  @type separator: string or None
2756
  @param separator: String used to separate fields
2757
  @type header: bool
2758
  @param header: Whether to show header row
2759
  @type force_filter: bool
2760
  @param force_filter: Whether to always treat names as filter
2761
  @type format_override: dict
2762
  @param format_override: Dictionary for overriding field formatting functions,
2763
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2764
  @type verbose: boolean
2765
  @param verbose: whether to use verbose field descriptions or not
2766

2767
  """
2768
  if not names:
2769
    names = None
2770

    
2771
  qfilter = qlang.MakeFilter(names, force_filter)
2772

    
2773
  if cl is None:
2774
    cl = GetClient()
2775

    
2776
  response = cl.Query(resource, fields, qfilter)
2777

    
2778
  found_unknown = _WarnUnknownFields(response.fields)
2779

    
2780
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2781
                                     header=header,
2782
                                     format_override=format_override,
2783
                                     verbose=verbose)
2784

    
2785
  for line in data:
2786
    ToStdout(line)
2787

    
2788
  assert ((found_unknown and status == QR_UNKNOWN) or
2789
          (not found_unknown and status != QR_UNKNOWN))
2790

    
2791
  if status == QR_UNKNOWN:
2792
    return constants.EXIT_UNKNOWN_FIELD
2793

    
2794
  # TODO: Should the list command fail if not all data could be collected?
2795
  return constants.EXIT_SUCCESS
2796

    
2797

    
2798
def GenericListFields(resource, fields, separator, header, cl=None):
2799
  """Generic implementation for listing fields for a resource.
2800

2801
  @param resource: One of L{constants.QR_VIA_LUXI}
2802
  @type fields: list of strings
2803
  @param fields: List of fields to query for
2804
  @type separator: string or None
2805
  @param separator: String used to separate fields
2806
  @type header: bool
2807
  @param header: Whether to show header row
2808

2809
  """
2810
  if cl is None:
2811
    cl = GetClient()
2812

    
2813
  if not fields:
2814
    fields = None
2815

    
2816
  response = cl.QueryFields(resource, fields)
2817

    
2818
  found_unknown = _WarnUnknownFields(response.fields)
2819

    
2820
  columns = [
2821
    TableColumn("Name", str, False),
2822
    TableColumn("Title", str, False),
2823
    TableColumn("Description", str, False),
2824
    ]
2825

    
2826
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2827

    
2828
  for line in FormatTable(rows, columns, header, separator):
2829
    ToStdout(line)
2830

    
2831
  if found_unknown:
2832
    return constants.EXIT_UNKNOWN_FIELD
2833

    
2834
  return constants.EXIT_SUCCESS
2835

    
2836

    
2837
class TableColumn:
2838
  """Describes a column for L{FormatTable}.
2839

2840
  """
2841
  def __init__(self, title, fn, align_right):
2842
    """Initializes this class.
2843

2844
    @type title: string
2845
    @param title: Column title
2846
    @type fn: callable
2847
    @param fn: Formatting function
2848
    @type align_right: bool
2849
    @param align_right: Whether to align values on the right-hand side
2850

2851
    """
2852
    self.title = title
2853
    self.format = fn
2854
    self.align_right = align_right
2855

    
2856

    
2857
def _GetColFormatString(width, align_right):
2858
  """Returns the format string for a field.
2859

2860
  """
2861
  if align_right:
2862
    sign = ""
2863
  else:
2864
    sign = "-"
2865

    
2866
  return "%%%s%ss" % (sign, width)
2867

    
2868

    
2869
def FormatTable(rows, columns, header, separator):
2870
  """Formats data as a table.
2871

2872
  @type rows: list of lists
2873
  @param rows: Row data, one list per row
2874
  @type columns: list of L{TableColumn}
2875
  @param columns: Column descriptions
2876
  @type header: bool
2877
  @param header: Whether to show header row
2878
  @type separator: string or None
2879
  @param separator: String used to separate columns
2880

2881
  """
2882
  if header:
2883
    data = [[col.title for col in columns]]
2884
    colwidth = [len(col.title) for col in columns]
2885
  else:
2886
    data = []
2887
    colwidth = [0 for _ in columns]
2888

    
2889
  # Format row data
2890
  for row in rows:
2891
    assert len(row) == len(columns)
2892

    
2893
    formatted = [col.format(value) for value, col in zip(row, columns)]
2894

    
2895
    if separator is None:
2896
      # Update column widths
2897
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2898
        # Modifying a list's items while iterating is fine
2899
        colwidth[idx] = max(oldwidth, len(value))
2900

    
2901
    data.append(formatted)
2902

    
2903
  if separator is not None:
2904
    # Return early if a separator is used
2905
    return [separator.join(row) for row in data]
2906

    
2907
  if columns and not columns[-1].align_right:
2908
    # Avoid unnecessary spaces at end of line
2909
    colwidth[-1] = 0
2910

    
2911
  # Build format string
2912
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2913
                  for col, width in zip(columns, colwidth)])
2914

    
2915
  return [fmt % tuple(row) for row in data]
2916

    
2917

    
2918
def FormatTimestamp(ts):
2919
  """Formats a given timestamp.
2920

2921
  @type ts: timestamp
2922
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2923

2924
  @rtype: string
2925
  @return: a string with the formatted timestamp
2926

2927
  """
2928
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2929
    return "?"
2930
  sec, usec = ts
2931
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2932

    
2933

    
2934
def ParseTimespec(value):
2935
  """Parse a time specification.
2936

2937
  The following suffixed will be recognized:
2938

2939
    - s: seconds
2940
    - m: minutes
2941
    - h: hours
2942
    - d: day
2943
    - w: weeks
2944

2945
  Without any suffix, the value will be taken to be in seconds.
2946

2947
  """
2948
  value = str(value)
2949
  if not value:
2950
    raise errors.OpPrereqError("Empty time specification passed")
2951
  suffix_map = {
2952
    "s": 1,
2953
    "m": 60,
2954
    "h": 3600,
2955
    "d": 86400,
2956
    "w": 604800,
2957
    }
2958
  if value[-1] not in suffix_map:
2959
    try:
2960
      value = int(value)
2961
    except (TypeError, ValueError):
2962
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2963
  else:
2964
    multiplier = suffix_map[value[-1]]
2965
    value = value[:-1]
2966
    if not value: # no data left after stripping the suffix
2967
      raise errors.OpPrereqError("Invalid time specification (only"
2968
                                 " suffix passed)")
2969
    try:
2970
      value = int(value) * multiplier
2971
    except (TypeError, ValueError):
2972
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2973
  return value
2974

    
2975

    
2976
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2977
                   filter_master=False, nodegroup=None):
2978
  """Returns the names of online nodes.
2979

2980
  This function will also log a warning on stderr with the names of
2981
  the online nodes.
2982

2983
  @param nodes: if not empty, use only this subset of nodes (minus the
2984
      offline ones)
2985
  @param cl: if not None, luxi client to use
2986
  @type nowarn: boolean
2987
  @param nowarn: by default, this function will output a note with the
2988
      offline nodes that are skipped; if this parameter is True the
2989
      note is not displayed
2990
  @type secondary_ips: boolean
2991
  @param secondary_ips: if True, return the secondary IPs instead of the
2992
      names, useful for doing network traffic over the replication interface
2993
      (if any)
2994
  @type filter_master: boolean
2995
  @param filter_master: if True, do not return the master node in the list
2996
      (useful in coordination with secondary_ips where we cannot check our
2997
      node name against the list)
2998
  @type nodegroup: string
2999
  @param nodegroup: If set, only return nodes in this node group
3000

3001
  """
3002
  if cl is None:
3003
    cl = GetClient()
3004

    
3005
  qfilter = []
3006

    
3007
  if nodes:
3008
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3009

    
3010
  if nodegroup is not None:
3011
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3012
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3013

    
3014
  if filter_master:
3015
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3016

    
3017
  if qfilter:
3018
    if len(qfilter) > 1:
3019
      final_filter = [qlang.OP_AND] + qfilter
3020
    else:
3021
      assert len(qfilter) == 1
3022
      final_filter = qfilter[0]
3023
  else:
3024
    final_filter = None
3025

    
3026
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3027

    
3028
  def _IsOffline(row):
3029
    (_, (_, offline), _) = row
3030
    return offline
3031

    
3032
  def _GetName(row):
3033
    ((_, name), _, _) = row
3034
    return name
3035

    
3036
  def _GetSip(row):
3037
    (_, _, (_, sip)) = row
3038
    return sip
3039

    
3040
  (offline, online) = compat.partition(result.data, _IsOffline)
3041

    
3042
  if offline and not nowarn:
3043
    ToStderr("Note: skipping offline node(s): %s" %
3044
             utils.CommaJoin(map(_GetName, offline)))
3045

    
3046
  if secondary_ips:
3047
    fn = _GetSip
3048
  else:
3049
    fn = _GetName
3050

    
3051
  return map(fn, online)
3052

    
3053

    
3054
def _ToStream(stream, txt, *args):
3055
  """Write a message to a stream, bypassing the logging system
3056

3057
  @type stream: file object
3058
  @param stream: the file to which we should write
3059
  @type txt: str
3060
  @param txt: the message
3061

3062
  """
3063
  try:
3064
    if args:
3065
      args = tuple(args)
3066
      stream.write(txt % args)
3067
    else:
3068
      stream.write(txt)
3069
    stream.write("\n")
3070
    stream.flush()
3071
  except IOError, err:
3072
    if err.errno == errno.EPIPE:
3073
      # our terminal went away, we'll exit
3074
      sys.exit(constants.EXIT_FAILURE)
3075
    else:
3076
      raise
3077

    
3078

    
3079
def ToStdout(txt, *args):
3080
  """Write a message to stdout only, bypassing the logging system
3081

3082
  This is just a wrapper over _ToStream.
3083

3084
  @type txt: str
3085
  @param txt: the message
3086

3087
  """
3088
  _ToStream(sys.stdout, txt, *args)
3089

    
3090

    
3091
def ToStderr(txt, *args):
3092
  """Write a message to stderr only, bypassing the logging system
3093

3094
  This is just a wrapper over _ToStream.
3095

3096
  @type txt: str
3097
  @param txt: the message
3098

3099
  """
3100
  _ToStream(sys.stderr, txt, *args)
3101

    
3102

    
3103
class JobExecutor(object):
3104
  """Class which manages the submission and execution of multiple jobs.
3105

3106
  Note that instances of this class should not be reused between
3107
  GetResults() calls.
3108

3109
  """
3110
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3111
    self.queue = []
3112
    if cl is None:
3113
      cl = GetClient()
3114
    self.cl = cl
3115
    self.verbose = verbose
3116
    self.jobs = []
3117
    self.opts = opts
3118
    self.feedback_fn = feedback_fn
3119
    self._counter = itertools.count()
3120

    
3121
  @staticmethod
3122
  def _IfName(name, fmt):
3123
    """Helper function for formatting name.
3124

3125
    """
3126
    if name:
3127
      return fmt % name
3128

    
3129
    return ""
3130

    
3131
  def QueueJob(self, name, *ops):
3132
    """Record a job for later submit.
3133

3134
    @type name: string
3135
    @param name: a description of the job, will be used in WaitJobSet
3136

3137
    """
3138
    SetGenericOpcodeOpts(ops, self.opts)
3139
    self.queue.append((self._counter.next(), name, ops))
3140

    
3141
  def AddJobId(self, name, status, job_id):
3142
    """Adds a job ID to the internal queue.
3143

3144
    """
3145
    self.jobs.append((self._counter.next(), status, job_id, name))
3146

    
3147
  def SubmitPending(self, each=False):
3148
    """Submit all pending jobs.
3149

3150
    """
3151
    if each:
3152
      results = []
3153
      for (_, _, ops) in self.queue:
3154
        # SubmitJob will remove the success status, but raise an exception if
3155
        # the submission fails, so we'll notice that anyway.
3156
        results.append([True, self.cl.SubmitJob(ops)[0]])
3157
    else:
3158
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3159
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3160
      self.jobs.append((idx, status, data, name))
3161

    
3162
  def _ChooseJob(self):
3163
    """Choose a non-waiting/queued job to poll next.
3164

3165
    """
3166
    assert self.jobs, "_ChooseJob called with empty job list"
3167

    
3168
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3169
                               ["status"])
3170
    assert result
3171

    
3172
    for job_data, status in zip(self.jobs, result):
3173
      if (isinstance(status, list) and status and
3174
          status[0] in (constants.JOB_STATUS_QUEUED,
3175
                        constants.JOB_STATUS_WAITING,
3176
                        constants.JOB_STATUS_CANCELING)):
3177
        # job is still present and waiting
3178
        continue
3179
      # good candidate found (either running job or lost job)
3180
      self.jobs.remove(job_data)
3181
      return job_data
3182

    
3183
    # no job found
3184
    return self.jobs.pop(0)
3185

    
3186
  def GetResults(self):
3187
    """Wait for and return the results of all jobs.
3188

3189
    @rtype: list
3190
    @return: list of tuples (success, job results), in the same order
3191
        as the submitted jobs; if a job has failed, instead of the result
3192
        there will be the error message
3193

3194
    """
3195
    if not self.jobs:
3196
      self.SubmitPending()
3197
    results = []
3198
    if self.verbose:
3199
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3200
      if ok_jobs:
3201
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3202

    
3203
    # first, remove any non-submitted jobs
3204
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3205
    for idx, _, jid, name in failures:
3206
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3207
      results.append((idx, False, jid))
3208

    
3209
    while self.jobs:
3210
      (idx, _, jid, name) = self._ChooseJob()
3211
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3212
      try:
3213
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3214
        success = True
3215
      except errors.JobLost, err:
3216
        _, job_result = FormatError(err)
3217
        ToStderr("Job %s%s has been archived, cannot check its result",
3218
                 jid, self._IfName(name, " for %s"))
3219
        success = False
3220
      except (errors.GenericError, luxi.ProtocolError), err:
3221
        _, job_result = FormatError(err)
3222
        success = False
3223
        # the error message will always be shown, verbose or not
3224
        ToStderr("Job %s%s has failed: %s",
3225
                 jid, self._IfName(name, " for %s"), job_result)
3226

    
3227
      results.append((idx, success, job_result))
3228

    
3229
    # sort based on the index, then drop it
3230
    results.sort()
3231
    results = [i[1:] for i in results]
3232

    
3233
    return results
3234

    
3235
  def WaitOrShow(self, wait):
3236
    """Wait for job results or only print the job IDs.
3237

3238
    @type wait: boolean
3239
    @param wait: whether to wait or not
3240

3241
    """
3242
    if wait:
3243
      return self.GetResults()
3244
    else:
3245
      if not self.jobs:
3246
        self.SubmitPending()
3247
      for _, status, result, name in self.jobs:
3248
        if status:
3249
          ToStdout("%s: %s", result, name)
3250
        else:
3251
          ToStderr("Failure for %s: %s", name, result)
3252
      return [row[1:3] for row in self.jobs]
3253

    
3254

    
3255
def FormatParameterDict(buf, param_dict, actual, level=1):
3256
  """Formats a parameter dictionary.
3257

3258
  @type buf: L{StringIO}
3259
  @param buf: the buffer into which to write
3260
  @type param_dict: dict
3261
  @param param_dict: the own parameters
3262
  @type actual: dict
3263
  @param actual: the current parameter set (including defaults)
3264
  @param level: Level of indent
3265

3266
  """
3267
  indent = "  " * level
3268
  for key in sorted(actual):
3269
    val = param_dict.get(key, "default (%s)" % actual[key])
3270
    buf.write("%s- %s: %s\n" % (indent, key, val))
3271

    
3272

    
3273
def ConfirmOperation(names, list_type, text, extra=""):
3274
  """Ask the user to confirm an operation on a list of list_type.
3275

3276
  This function is used to request confirmation for doing an operation
3277
  on a given list of list_type.
3278

3279
  @type names: list
3280
  @param names: the list of names that we display when
3281
      we ask for confirmation
3282
  @type list_type: str
3283
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3284
  @type text: str
3285
  @param text: the operation that the user should confirm
3286
  @rtype: boolean
3287
  @return: True or False depending on user's confirmation.
3288

3289
  """
3290
  count = len(names)
3291
  msg = ("The %s will operate on %d %s.\n%s"
3292
         "Do you want to continue?" % (text, count, list_type, extra))
3293
  affected = (("\nAffected %s:\n" % list_type) +
3294
              "\n".join(["  %s" % name for name in names]))
3295

    
3296
  choices = [("y", True, "Yes, execute the %s" % text),
3297
             ("n", False, "No, abort the %s" % text)]
3298

    
3299
  if count > 20:
3300
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3301
    question = msg
3302
  else:
3303
    question = msg + affected
3304

    
3305
  choice = AskUser(question, choices)
3306
  if choice == "v":
3307
    choices.pop(1)
3308
    choice = AskUser(msg + affected, choices)
3309
  return choice