Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 10889e0c

History | View | Annotate | Download (108 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46

    
47
from optparse import (OptionParser, TitledHelpFormatter,
48
                      Option, OptionValueError)
49

    
50

    
51
__all__ = [
52
  # Command line options
53
  "ADD_UIDS_OPT",
54
  "ALLOCATABLE_OPT",
55
  "ALLOC_POLICY_OPT",
56
  "ALL_OPT",
57
  "ALLOW_FAILOVER_OPT",
58
  "AUTO_PROMOTE_OPT",
59
  "AUTO_REPLACE_OPT",
60
  "BACKEND_OPT",
61
  "BLK_OS_OPT",
62
  "CAPAB_MASTER_OPT",
63
  "CAPAB_VM_OPT",
64
  "CLEANUP_OPT",
65
  "CLUSTER_DOMAIN_SECRET_OPT",
66
  "CONFIRM_OPT",
67
  "CP_SIZE_OPT",
68
  "DEBUG_OPT",
69
  "DEBUG_SIMERR_OPT",
70
  "DISKIDX_OPT",
71
  "DISK_OPT",
72
  "DISK_PARAMS_OPT",
73
  "DISK_TEMPLATE_OPT",
74
  "DRAINED_OPT",
75
  "DRY_RUN_OPT",
76
  "DRBD_HELPER_OPT",
77
  "DST_NODE_OPT",
78
  "EARLY_RELEASE_OPT",
79
  "ENABLED_HV_OPT",
80
  "ERROR_CODES_OPT",
81
  "FIELDS_OPT",
82
  "FILESTORE_DIR_OPT",
83
  "FILESTORE_DRIVER_OPT",
84
  "FORCE_FILTER_OPT",
85
  "FORCE_OPT",
86
  "FORCE_VARIANT_OPT",
87
  "GLOBAL_FILEDIR_OPT",
88
  "HID_OS_OPT",
89
  "GLOBAL_SHARED_FILEDIR_OPT",
90
  "HVLIST_OPT",
91
  "HVOPTS_OPT",
92
  "HYPERVISOR_OPT",
93
  "IALLOCATOR_OPT",
94
  "DEFAULT_IALLOCATOR_OPT",
95
  "IDENTIFY_DEFAULTS_OPT",
96
  "IGNORE_CONSIST_OPT",
97
  "IGNORE_ERRORS_OPT",
98
  "IGNORE_FAILURES_OPT",
99
  "IGNORE_OFFLINE_OPT",
100
  "IGNORE_REMOVE_FAILURES_OPT",
101
  "IGNORE_SECONDARIES_OPT",
102
  "IGNORE_SIZE_OPT",
103
  "INTERVAL_OPT",
104
  "MAC_PREFIX_OPT",
105
  "MAINTAIN_NODE_HEALTH_OPT",
106
  "MASTER_NETDEV_OPT",
107
  "MASTER_NETMASK_OPT",
108
  "MC_OPT",
109
  "MIGRATION_MODE_OPT",
110
  "NET_OPT",
111
  "NEW_CLUSTER_CERT_OPT",
112
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
113
  "NEW_CONFD_HMAC_KEY_OPT",
114
  "NEW_RAPI_CERT_OPT",
115
  "NEW_SECONDARY_OPT",
116
  "NEW_SPICE_CERT_OPT",
117
  "NIC_PARAMS_OPT",
118
  "NODE_FORCE_JOIN_OPT",
119
  "NODE_LIST_OPT",
120
  "NODE_PLACEMENT_OPT",
121
  "NODEGROUP_OPT",
122
  "NODE_PARAMS_OPT",
123
  "NODE_POWERED_OPT",
124
  "NODRBD_STORAGE_OPT",
125
  "NOHDR_OPT",
126
  "NOIPCHECK_OPT",
127
  "NO_INSTALL_OPT",
128
  "NONAMECHECK_OPT",
129
  "NOLVM_STORAGE_OPT",
130
  "NOMODIFY_ETCHOSTS_OPT",
131
  "NOMODIFY_SSH_SETUP_OPT",
132
  "NONICS_OPT",
133
  "NONLIVE_OPT",
134
  "NONPLUS1_OPT",
135
  "NOSHUTDOWN_OPT",
136
  "NOSTART_OPT",
137
  "NOSSH_KEYCHECK_OPT",
138
  "NOVOTING_OPT",
139
  "NO_REMEMBER_OPT",
140
  "NWSYNC_OPT",
141
  "OFFLINE_INST_OPT",
142
  "ONLINE_INST_OPT",
143
  "ON_PRIMARY_OPT",
144
  "ON_SECONDARY_OPT",
145
  "OFFLINE_OPT",
146
  "OSPARAMS_OPT",
147
  "OS_OPT",
148
  "OS_SIZE_OPT",
149
  "OOB_TIMEOUT_OPT",
150
  "POWER_DELAY_OPT",
151
  "PREALLOC_WIPE_DISKS_OPT",
152
  "PRIMARY_IP_VERSION_OPT",
153
  "PRIMARY_ONLY_OPT",
154
  "PRIORITY_OPT",
155
  "RAPI_CERT_OPT",
156
  "READD_OPT",
157
  "REBOOT_TYPE_OPT",
158
  "REMOVE_INSTANCE_OPT",
159
  "REMOVE_UIDS_OPT",
160
  "RESERVED_LVS_OPT",
161
  "ROMAN_OPT",
162
  "SECONDARY_IP_OPT",
163
  "SECONDARY_ONLY_OPT",
164
  "SELECT_OS_OPT",
165
  "SEP_OPT",
166
  "SHOWCMD_OPT",
167
  "SHUTDOWN_TIMEOUT_OPT",
168
  "SINGLE_NODE_OPT",
169
  "SPECS_CPU_COUNT_OPT",
170
  "SPECS_DISK_COUNT_OPT",
171
  "SPECS_DISK_SIZE_OPT",
172
  "SPECS_MEM_SIZE_OPT",
173
  "SPECS_NIC_COUNT_OPT",
174
  "SPICE_CACERT_OPT",
175
  "SPICE_CERT_OPT",
176
  "SRC_DIR_OPT",
177
  "SRC_NODE_OPT",
178
  "SUBMIT_OPT",
179
  "STARTUP_PAUSED_OPT",
180
  "STATIC_OPT",
181
  "SYNC_OPT",
182
  "TAG_ADD_OPT",
183
  "TAG_SRC_OPT",
184
  "TIMEOUT_OPT",
185
  "TO_GROUP_OPT",
186
  "UIDPOOL_OPT",
187
  "USEUNITS_OPT",
188
  "USE_EXTERNAL_MIP_SCRIPT",
189
  "USE_REPL_NET_OPT",
190
  "VERBOSE_OPT",
191
  "VG_NAME_OPT",
192
  "YES_DOIT_OPT",
193
  "DISK_STATE_OPT",
194
  "HV_STATE_OPT",
195
  "IGNORE_IPOLICY_OPT",
196
  # Generic functions for CLI programs
197
  "ConfirmOperation",
198
  "GenericMain",
199
  "GenericInstanceCreate",
200
  "GenericList",
201
  "GenericListFields",
202
  "GetClient",
203
  "GetOnlineNodes",
204
  "JobExecutor",
205
  "JobSubmittedException",
206
  "ParseTimespec",
207
  "RunWhileClusterStopped",
208
  "SubmitOpCode",
209
  "SubmitOrSend",
210
  "UsesRPC",
211
  # Formatting functions
212
  "ToStderr", "ToStdout",
213
  "FormatError",
214
  "FormatQueryResult",
215
  "FormatParameterDict",
216
  "GenerateTable",
217
  "AskUser",
218
  "FormatTimestamp",
219
  "FormatLogMessage",
220
  # Tags functions
221
  "ListTags",
222
  "AddTags",
223
  "RemoveTags",
224
  # command line options support infrastructure
225
  "ARGS_MANY_INSTANCES",
226
  "ARGS_MANY_NODES",
227
  "ARGS_MANY_GROUPS",
228
  "ARGS_NONE",
229
  "ARGS_ONE_INSTANCE",
230
  "ARGS_ONE_NODE",
231
  "ARGS_ONE_GROUP",
232
  "ARGS_ONE_OS",
233
  "ArgChoice",
234
  "ArgCommand",
235
  "ArgFile",
236
  "ArgGroup",
237
  "ArgHost",
238
  "ArgInstance",
239
  "ArgJobId",
240
  "ArgNode",
241
  "ArgOs",
242
  "ArgSuggest",
243
  "ArgUnknown",
244
  "OPT_COMPL_INST_ADD_NODES",
245
  "OPT_COMPL_MANY_NODES",
246
  "OPT_COMPL_ONE_IALLOCATOR",
247
  "OPT_COMPL_ONE_INSTANCE",
248
  "OPT_COMPL_ONE_NODE",
249
  "OPT_COMPL_ONE_NODEGROUP",
250
  "OPT_COMPL_ONE_OS",
251
  "cli_option",
252
  "SplitNodeOption",
253
  "CalculateOSNames",
254
  "ParseFields",
255
  "COMMON_CREATE_OPTS",
256
  ]
257

    
258
NO_PREFIX = "no_"
259
UN_PREFIX = "-"
260

    
261
#: Priorities (sorted)
262
_PRIORITY_NAMES = [
263
  ("low", constants.OP_PRIO_LOW),
264
  ("normal", constants.OP_PRIO_NORMAL),
265
  ("high", constants.OP_PRIO_HIGH),
266
  ]
267

    
268
#: Priority dictionary for easier lookup
269
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
270
# we migrate to Python 2.6
271
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
272

    
273
# Query result status for clients
274
(QR_NORMAL,
275
 QR_UNKNOWN,
276
 QR_INCOMPLETE) = range(3)
277

    
278
#: Maximum batch size for ChooseJob
279
_CHOOSE_BATCH = 25
280

    
281

    
282
class _Argument:
283
  def __init__(self, min=0, max=None): # pylint: disable=W0622
284
    self.min = min
285
    self.max = max
286

    
287
  def __repr__(self):
288
    return ("<%s min=%s max=%s>" %
289
            (self.__class__.__name__, self.min, self.max))
290

    
291

    
292
class ArgSuggest(_Argument):
293
  """Suggesting argument.
294

295
  Value can be any of the ones passed to the constructor.
296

297
  """
298
  # pylint: disable=W0622
299
  def __init__(self, min=0, max=None, choices=None):
300
    _Argument.__init__(self, min=min, max=max)
301
    self.choices = choices
302

    
303
  def __repr__(self):
304
    return ("<%s min=%s max=%s choices=%r>" %
305
            (self.__class__.__name__, self.min, self.max, self.choices))
306

    
307

    
308
class ArgChoice(ArgSuggest):
309
  """Choice argument.
310

311
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
312
  but value must be one of the choices.
313

314
  """
315

    
316

    
317
class ArgUnknown(_Argument):
318
  """Unknown argument to program (e.g. determined at runtime).
319

320
  """
321

    
322

    
323
class ArgInstance(_Argument):
324
  """Instances argument.
325

326
  """
327

    
328

    
329
class ArgNode(_Argument):
330
  """Node argument.
331

332
  """
333

    
334

    
335
class ArgGroup(_Argument):
336
  """Node group argument.
337

338
  """
339

    
340

    
341
class ArgJobId(_Argument):
342
  """Job ID argument.
343

344
  """
345

    
346

    
347
class ArgFile(_Argument):
348
  """File path argument.
349

350
  """
351

    
352

    
353
class ArgCommand(_Argument):
354
  """Command argument.
355

356
  """
357

    
358

    
359
class ArgHost(_Argument):
360
  """Host argument.
361

362
  """
363

    
364

    
365
class ArgOs(_Argument):
366
  """OS argument.
367

368
  """
369

    
370

    
371
ARGS_NONE = []
372
ARGS_MANY_INSTANCES = [ArgInstance()]
373
ARGS_MANY_NODES = [ArgNode()]
374
ARGS_MANY_GROUPS = [ArgGroup()]
375
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
376
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
377
# TODO
378
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
379
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
380

    
381

    
382
def _ExtractTagsObject(opts, args):
383
  """Extract the tag type object.
384

385
  Note that this function will modify its args parameter.
386

387
  """
388
  if not hasattr(opts, "tag_type"):
389
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
390
  kind = opts.tag_type
391
  if kind == constants.TAG_CLUSTER:
392
    retval = kind, kind
393
  elif kind in (constants.TAG_NODEGROUP,
394
                constants.TAG_NODE,
395
                constants.TAG_INSTANCE):
396
    if not args:
397
      raise errors.OpPrereqError("no arguments passed to the command")
398
    name = args.pop(0)
399
    retval = kind, name
400
  else:
401
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
402
  return retval
403

    
404

    
405
def _ExtendTags(opts, args):
406
  """Extend the args if a source file has been given.
407

408
  This function will extend the tags with the contents of the file
409
  passed in the 'tags_source' attribute of the opts parameter. A file
410
  named '-' will be replaced by stdin.
411

412
  """
413
  fname = opts.tags_source
414
  if fname is None:
415
    return
416
  if fname == "-":
417
    new_fh = sys.stdin
418
  else:
419
    new_fh = open(fname, "r")
420
  new_data = []
421
  try:
422
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
423
    # because of python bug 1633941
424
    while True:
425
      line = new_fh.readline()
426
      if not line:
427
        break
428
      new_data.append(line.strip())
429
  finally:
430
    new_fh.close()
431
  args.extend(new_data)
432

    
433

    
434
def ListTags(opts, args):
435
  """List the tags on a given object.
436

437
  This is a generic implementation that knows how to deal with all
438
  three cases of tag objects (cluster, node, instance). The opts
439
  argument is expected to contain a tag_type field denoting what
440
  object type we work on.
441

442
  """
443
  kind, name = _ExtractTagsObject(opts, args)
444
  cl = GetClient()
445
  result = cl.QueryTags(kind, name)
446
  result = list(result)
447
  result.sort()
448
  for tag in result:
449
    ToStdout(tag)
450

    
451

    
452
def AddTags(opts, args):
453
  """Add tags on a given object.
454

455
  This is a generic implementation that knows how to deal with all
456
  three cases of tag objects (cluster, node, instance). The opts
457
  argument is expected to contain a tag_type field denoting what
458
  object type we work on.
459

460
  """
461
  kind, name = _ExtractTagsObject(opts, args)
462
  _ExtendTags(opts, args)
463
  if not args:
464
    raise errors.OpPrereqError("No tags to be added")
465
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
466
  SubmitOpCode(op, opts=opts)
467

    
468

    
469
def RemoveTags(opts, args):
470
  """Remove tags from a given object.
471

472
  This is a generic implementation that knows how to deal with all
473
  three cases of tag objects (cluster, node, instance). The opts
474
  argument is expected to contain a tag_type field denoting what
475
  object type we work on.
476

477
  """
478
  kind, name = _ExtractTagsObject(opts, args)
479
  _ExtendTags(opts, args)
480
  if not args:
481
    raise errors.OpPrereqError("No tags to be removed")
482
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
483
  SubmitOpCode(op, opts=opts)
484

    
485

    
486
def check_unit(option, opt, value): # pylint: disable=W0613
487
  """OptParsers custom converter for units.
488

489
  """
490
  try:
491
    return utils.ParseUnit(value)
492
  except errors.UnitParseError, err:
493
    raise OptionValueError("option %s: %s" % (opt, err))
494

    
495

    
496
def _SplitKeyVal(opt, data):
497
  """Convert a KeyVal string into a dict.
498

499
  This function will convert a key=val[,...] string into a dict. Empty
500
  values will be converted specially: keys which have the prefix 'no_'
501
  will have the value=False and the prefix stripped, the others will
502
  have value=True.
503

504
  @type opt: string
505
  @param opt: a string holding the option name for which we process the
506
      data, used in building error messages
507
  @type data: string
508
  @param data: a string of the format key=val,key=val,...
509
  @rtype: dict
510
  @return: {key=val, key=val}
511
  @raises errors.ParameterError: if there are duplicate keys
512

513
  """
514
  kv_dict = {}
515
  if data:
516
    for elem in utils.UnescapeAndSplit(data, sep=","):
517
      if "=" in elem:
518
        key, val = elem.split("=", 1)
519
      else:
520
        if elem.startswith(NO_PREFIX):
521
          key, val = elem[len(NO_PREFIX):], False
522
        elif elem.startswith(UN_PREFIX):
523
          key, val = elem[len(UN_PREFIX):], None
524
        else:
525
          key, val = elem, True
526
      if key in kv_dict:
527
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
528
                                    (key, opt))
529
      kv_dict[key] = val
530
  return kv_dict
531

    
532

    
533
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
534
  """Custom parser for ident:key=val,key=val options.
535

536
  This will store the parsed values as a tuple (ident, {key: val}). As such,
537
  multiple uses of this option via action=append is possible.
538

539
  """
540
  if ":" not in value:
541
    ident, rest = value, ""
542
  else:
543
    ident, rest = value.split(":", 1)
544

    
545
  if ident.startswith(NO_PREFIX):
546
    if rest:
547
      msg = "Cannot pass options when removing parameter groups: %s" % value
548
      raise errors.ParameterError(msg)
549
    retval = (ident[len(NO_PREFIX):], False)
550
  elif ident.startswith(UN_PREFIX):
551
    if rest:
552
      msg = "Cannot pass options when removing parameter groups: %s" % value
553
      raise errors.ParameterError(msg)
554
    retval = (ident[len(UN_PREFIX):], None)
555
  else:
556
    kv_dict = _SplitKeyVal(opt, rest)
557
    retval = (ident, kv_dict)
558
  return retval
559

    
560

    
561
def check_key_val(option, opt, value):  # pylint: disable=W0613
562
  """Custom parser class for key=val,key=val options.
563

564
  This will store the parsed values as a dict {key: val}.
565

566
  """
567
  return _SplitKeyVal(opt, value)
568

    
569

    
570
def check_bool(option, opt, value): # pylint: disable=W0613
571
  """Custom parser for yes/no options.
572

573
  This will store the parsed value as either True or False.
574

575
  """
576
  value = value.lower()
577
  if value == constants.VALUE_FALSE or value == "no":
578
    return False
579
  elif value == constants.VALUE_TRUE or value == "yes":
580
    return True
581
  else:
582
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
583

    
584

    
585
# completion_suggestion is normally a list. Using numeric values not evaluating
586
# to False for dynamic completion.
587
(OPT_COMPL_MANY_NODES,
588
 OPT_COMPL_ONE_NODE,
589
 OPT_COMPL_ONE_INSTANCE,
590
 OPT_COMPL_ONE_OS,
591
 OPT_COMPL_ONE_IALLOCATOR,
592
 OPT_COMPL_INST_ADD_NODES,
593
 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
594

    
595
OPT_COMPL_ALL = frozenset([
596
  OPT_COMPL_MANY_NODES,
597
  OPT_COMPL_ONE_NODE,
598
  OPT_COMPL_ONE_INSTANCE,
599
  OPT_COMPL_ONE_OS,
600
  OPT_COMPL_ONE_IALLOCATOR,
601
  OPT_COMPL_INST_ADD_NODES,
602
  OPT_COMPL_ONE_NODEGROUP,
603
  ])
604

    
605

    
606
class CliOption(Option):
607
  """Custom option class for optparse.
608

609
  """
610
  ATTRS = Option.ATTRS + [
611
    "completion_suggest",
612
    ]
613
  TYPES = Option.TYPES + (
614
    "identkeyval",
615
    "keyval",
616
    "unit",
617
    "bool",
618
    )
619
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
620
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
621
  TYPE_CHECKER["keyval"] = check_key_val
622
  TYPE_CHECKER["unit"] = check_unit
623
  TYPE_CHECKER["bool"] = check_bool
624

    
625

    
626
# optparse.py sets make_option, so we do it for our own option class, too
627
cli_option = CliOption
628

    
629

    
630
_YORNO = "yes|no"
631

    
632
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
633
                       help="Increase debugging level")
634

    
635
NOHDR_OPT = cli_option("--no-headers", default=False,
636
                       action="store_true", dest="no_headers",
637
                       help="Don't display column headers")
638

    
639
SEP_OPT = cli_option("--separator", default=None,
640
                     action="store", dest="separator",
641
                     help=("Separator between output fields"
642
                           " (defaults to one space)"))
643

    
644
USEUNITS_OPT = cli_option("--units", default=None,
645
                          dest="units", choices=("h", "m", "g", "t"),
646
                          help="Specify units for output (one of h/m/g/t)")
647

    
648
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
649
                        type="string", metavar="FIELDS",
650
                        help="Comma separated list of output fields")
651

    
652
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
653
                       default=False, help="Force the operation")
654

    
655
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
656
                         default=False, help="Do not require confirmation")
657

    
658
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
659
                                  action="store_true", default=False,
660
                                  help=("Ignore offline nodes and do as much"
661
                                        " as possible"))
662

    
663
TAG_ADD_OPT = cli_option("--tags", dest="tags",
664
                         default=None, help="Comma-separated list of instance"
665
                                            " tags")
666

    
667
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
668
                         default=None, help="File with tag names")
669

    
670
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
671
                        default=False, action="store_true",
672
                        help=("Submit the job and return the job ID, but"
673
                              " don't wait for the job to finish"))
674

    
675
SYNC_OPT = cli_option("--sync", dest="do_locking",
676
                      default=False, action="store_true",
677
                      help=("Grab locks while doing the queries"
678
                            " in order to ensure more consistent results"))
679

    
680
DRY_RUN_OPT = cli_option("--dry-run", default=False,
681
                         action="store_true",
682
                         help=("Do not execute the operation, just run the"
683
                               " check steps and verify it it could be"
684
                               " executed"))
685

    
686
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
687
                         action="store_true",
688
                         help="Increase the verbosity of the operation")
689

    
690
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
691
                              action="store_true", dest="simulate_errors",
692
                              help="Debugging option that makes the operation"
693
                              " treat most runtime checks as failed")
694

    
695
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
696
                        default=True, action="store_false",
697
                        help="Don't wait for sync (DANGEROUS!)")
698

    
699
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
700
                             action="store_true", default=False,
701
                             help="Enable offline instance")
702

    
703
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
704
                              action="store_true", default=False,
705
                              help="Disable down instance")
706

    
707
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
708
                               help=("Custom disk setup (%s)" %
709
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
710
                               default=None, metavar="TEMPL",
711
                               choices=list(constants.DISK_TEMPLATES))
712

    
713
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
714
                        help="Do not create any network cards for"
715
                        " the instance")
716

    
717
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
718
                               help="Relative path under default cluster-wide"
719
                               " file storage dir to store file-based disks",
720
                               default=None, metavar="<DIR>")
721

    
722
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
723
                                  help="Driver to use for image files",
724
                                  default="loop", metavar="<DRIVER>",
725
                                  choices=list(constants.FILE_DRIVER))
726

    
727
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
728
                            help="Select nodes for the instance automatically"
729
                            " using the <NAME> iallocator plugin",
730
                            default=None, type="string",
731
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
732

    
733
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
734
                            metavar="<NAME>",
735
                            help="Set the default instance allocator plugin",
736
                            default=None, type="string",
737
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
738

    
739
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
740
                    metavar="<os>",
741
                    completion_suggest=OPT_COMPL_ONE_OS)
742

    
743
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
744
                         type="keyval", default={},
745
                         help="OS parameters")
746

    
747
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
748
                               action="store_true", default=False,
749
                               help="Force an unknown variant")
750

    
751
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
752
                            action="store_true", default=False,
753
                            help="Do not install the OS (will"
754
                            " enable no-start)")
755

    
756
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
757
                         type="keyval", default={},
758
                         help="Backend parameters")
759

    
760
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
761
                        default={}, dest="hvparams",
762
                        help="Hypervisor parameters")
763

    
764
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
765
                             help="Disk template parameters, in the format"
766
                             " template:option=value,option=value,...",
767
                             type="identkeyval", action="append", default=[])
768

    
769
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
770
                                 type="keyval", default={},
771
                                 help="Memory count specs: min, max, std"
772
                                 " (in MB)")
773

    
774
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
775
                                 type="keyval", default={},
776
                                 help="CPU count specs: min, max, std")
777

    
778
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
779
                                  dest="ispecs_disk_count",
780
                                  type="keyval", default={},
781
                                  help="Disk count specs: min, max, std")
782

    
783
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
784
                                 type="keyval", default={},
785
                                 help="Disk size specs: min, max, std (in MB)")
786

    
787
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
788
                                 type="keyval", default={},
789
                                 help="NIC count specs: min, max, std")
790

    
791
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
792
                            help="Hypervisor and hypervisor options, in the"
793
                            " format hypervisor:option=value,option=value,...",
794
                            default=None, type="identkeyval")
795

    
796
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
797
                        help="Hypervisor and hypervisor options, in the"
798
                        " format hypervisor:option=value,option=value,...",
799
                        default=[], action="append", type="identkeyval")
800

    
801
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
802
                           action="store_false",
803
                           help="Don't check that the instance's IP"
804
                           " is alive")
805

    
806
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
807
                             default=True, action="store_false",
808
                             help="Don't check that the instance's name"
809
                             " is resolvable")
810

    
811
NET_OPT = cli_option("--net",
812
                     help="NIC parameters", default=[],
813
                     dest="nics", action="append", type="identkeyval")
814

    
815
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
816
                      dest="disks", action="append", type="identkeyval")
817

    
818
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
819
                         help="Comma-separated list of disks"
820
                         " indices to act on (e.g. 0,2) (optional,"
821
                         " defaults to all disks)")
822

    
823
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
824
                         help="Enforces a single-disk configuration using the"
825
                         " given disk size, in MiB unless a suffix is used",
826
                         default=None, type="unit", metavar="<size>")
827

    
828
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
829
                                dest="ignore_consistency",
830
                                action="store_true", default=False,
831
                                help="Ignore the consistency of the disks on"
832
                                " the secondary")
833

    
834
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
835
                                dest="allow_failover",
836
                                action="store_true", default=False,
837
                                help="If migration is not possible fallback to"
838
                                     " failover")
839

    
840
NONLIVE_OPT = cli_option("--non-live", dest="live",
841
                         default=True, action="store_false",
842
                         help="Do a non-live migration (this usually means"
843
                         " freeze the instance, save the state, transfer and"
844
                         " only then resume running on the secondary node)")
845

    
846
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
847
                                default=None,
848
                                choices=list(constants.HT_MIGRATION_MODES),
849
                                help="Override default migration mode (choose"
850
                                " either live or non-live")
851

    
852
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
853
                                help="Target node and optional secondary node",
854
                                metavar="<pnode>[:<snode>]",
855
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
856

    
857
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
858
                           action="append", metavar="<node>",
859
                           help="Use only this node (can be used multiple"
860
                           " times, if not given defaults to all nodes)",
861
                           completion_suggest=OPT_COMPL_ONE_NODE)
862

    
863
NODEGROUP_OPT_NAME = "--node-group"
864
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
865
                           dest="nodegroup",
866
                           help="Node group (name or uuid)",
867
                           metavar="<nodegroup>",
868
                           default=None, type="string",
869
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
870

    
871
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
872
                             metavar="<node>",
873
                             completion_suggest=OPT_COMPL_ONE_NODE)
874

    
875
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
876
                         action="store_false",
877
                         help="Don't start the instance after creation")
878

    
879
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
880
                         action="store_true", default=False,
881
                         help="Show command instead of executing it")
882

    
883
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
884
                         default=False, action="store_true",
885
                         help="Instead of performing the migration, try to"
886
                         " recover from a failed cleanup. This is safe"
887
                         " to run even if the instance is healthy, but it"
888
                         " will create extra replication traffic and "
889
                         " disrupt briefly the replication (like during the"
890
                         " migration")
891

    
892
STATIC_OPT = cli_option("-s", "--static", dest="static",
893
                        action="store_true", default=False,
894
                        help="Only show configuration data, not runtime data")
895

    
896
ALL_OPT = cli_option("--all", dest="show_all",
897
                     default=False, action="store_true",
898
                     help="Show info on all instances on the cluster."
899
                     " This can take a long time to run, use wisely")
900

    
901
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
902
                           action="store_true", default=False,
903
                           help="Interactive OS reinstall, lists available"
904
                           " OS templates for selection")
905

    
906
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
907
                                 action="store_true", default=False,
908
                                 help="Remove the instance from the cluster"
909
                                 " configuration even if there are failures"
910
                                 " during the removal process")
911

    
912
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
913
                                        dest="ignore_remove_failures",
914
                                        action="store_true", default=False,
915
                                        help="Remove the instance from the"
916
                                        " cluster configuration even if there"
917
                                        " are failures during the removal"
918
                                        " process")
919

    
920
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
921
                                 action="store_true", default=False,
922
                                 help="Remove the instance from the cluster")
923

    
924
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
925
                               help="Specifies the new node for the instance",
926
                               metavar="NODE", default=None,
927
                               completion_suggest=OPT_COMPL_ONE_NODE)
928

    
929
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
930
                               help="Specifies the new secondary node",
931
                               metavar="NODE", default=None,
932
                               completion_suggest=OPT_COMPL_ONE_NODE)
933

    
934
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
935
                            default=False, action="store_true",
936
                            help="Replace the disk(s) on the primary"
937
                                 " node (applies only to internally mirrored"
938
                                 " disk templates, e.g. %s)" %
939
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
940

    
941
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
942
                              default=False, action="store_true",
943
                              help="Replace the disk(s) on the secondary"
944
                                   " node (applies only to internally mirrored"
945
                                   " disk templates, e.g. %s)" %
946
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
947

    
948
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
949
                              default=False, action="store_true",
950
                              help="Lock all nodes and auto-promote as needed"
951
                              " to MC status")
952

    
953
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
954
                              default=False, action="store_true",
955
                              help="Automatically replace faulty disks"
956
                                   " (applies only to internally mirrored"
957
                                   " disk templates, e.g. %s)" %
958
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
959

    
960
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
961
                             default=False, action="store_true",
962
                             help="Ignore current recorded size"
963
                             " (useful for forcing activation when"
964
                             " the recorded size is wrong)")
965

    
966
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
967
                          metavar="<node>",
968
                          completion_suggest=OPT_COMPL_ONE_NODE)
969

    
970
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
971
                         metavar="<dir>")
972

    
973
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
974
                              help="Specify the secondary ip for the node",
975
                              metavar="ADDRESS", default=None)
976

    
977
READD_OPT = cli_option("--readd", dest="readd",
978
                       default=False, action="store_true",
979
                       help="Readd old node after replacing it")
980

    
981
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
982
                                default=True, action="store_false",
983
                                help="Disable SSH key fingerprint checking")
984

    
985
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
986
                                 default=False, action="store_true",
987
                                 help="Force the joining of a node")
988

    
989
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
990
                    type="bool", default=None, metavar=_YORNO,
991
                    help="Set the master_candidate flag on the node")
992

    
993
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
994
                         type="bool", default=None,
995
                         help=("Set the offline flag on the node"
996
                               " (cluster does not communicate with offline"
997
                               " nodes)"))
998

    
999
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1000
                         type="bool", default=None,
1001
                         help=("Set the drained flag on the node"
1002
                               " (excluded from allocation operations)"))
1003

    
1004
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1005
                    type="bool", default=None, metavar=_YORNO,
1006
                    help="Set the master_capable flag on the node")
1007

    
1008
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1009
                    type="bool", default=None, metavar=_YORNO,
1010
                    help="Set the vm_capable flag on the node")
1011

    
1012
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1013
                             type="bool", default=None, metavar=_YORNO,
1014
                             help="Set the allocatable flag on a volume")
1015

    
1016
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1017
                               help="Disable support for lvm based instances"
1018
                               " (cluster-wide)",
1019
                               action="store_false", default=True)
1020

    
1021
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1022
                            dest="enabled_hypervisors",
1023
                            help="Comma-separated list of hypervisors",
1024
                            type="string", default=None)
1025

    
1026
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1027
                            type="keyval", default={},
1028
                            help="NIC parameters")
1029

    
1030
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1031
                         dest="candidate_pool_size", type="int",
1032
                         help="Set the candidate pool size")
1033

    
1034
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1035
                         help=("Enables LVM and specifies the volume group"
1036
                               " name (cluster-wide) for disk allocation"
1037
                               " [%s]" % constants.DEFAULT_VG),
1038
                         metavar="VG", default=None)
1039

    
1040
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1041
                          help="Destroy cluster", action="store_true")
1042

    
1043
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1044
                          help="Skip node agreement check (dangerous)",
1045
                          action="store_true", default=False)
1046

    
1047
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1048
                            help="Specify the mac prefix for the instance IP"
1049
                            " addresses, in the format XX:XX:XX",
1050
                            metavar="PREFIX",
1051
                            default=None)
1052

    
1053
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1054
                               help="Specify the node interface (cluster-wide)"
1055
                               " on which the master IP address will be added"
1056
                               " (cluster init default: %s)" %
1057
                               constants.DEFAULT_BRIDGE,
1058
                               metavar="NETDEV",
1059
                               default=None)
1060

    
1061
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1062
                                help="Specify the netmask of the master IP",
1063
                                metavar="NETMASK",
1064
                                default=None)
1065

    
1066
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1067
                                dest="use_external_mip_script",
1068
                                help="Specify whether to run a user-provided"
1069
                                " script for the master IP address turnup and"
1070
                                " turndown operations",
1071
                                type="bool", metavar=_YORNO, default=None)
1072

    
1073
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1074
                                help="Specify the default directory (cluster-"
1075
                                "wide) for storing the file-based disks [%s]" %
1076
                                constants.DEFAULT_FILE_STORAGE_DIR,
1077
                                metavar="DIR",
1078
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1079

    
1080
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1081
                            dest="shared_file_storage_dir",
1082
                            help="Specify the default directory (cluster-"
1083
                            "wide) for storing the shared file-based"
1084
                            " disks [%s]" %
1085
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1086
                            metavar="SHAREDDIR",
1087
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1088

    
1089
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1090
                                   help="Don't modify /etc/hosts",
1091
                                   action="store_false", default=True)
1092

    
1093
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1094
                                    help="Don't initialize SSH keys",
1095
                                    action="store_false", default=True)
1096

    
1097
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1098
                             help="Enable parseable error messages",
1099
                             action="store_true", default=False)
1100

    
1101
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1102
                          help="Skip N+1 memory redundancy tests",
1103
                          action="store_true", default=False)
1104

    
1105
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1106
                             help="Type of reboot: soft/hard/full",
1107
                             default=constants.INSTANCE_REBOOT_HARD,
1108
                             metavar="<REBOOT>",
1109
                             choices=list(constants.REBOOT_TYPES))
1110

    
1111
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1112
                                    dest="ignore_secondaries",
1113
                                    default=False, action="store_true",
1114
                                    help="Ignore errors from secondaries")
1115

    
1116
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1117
                            action="store_false", default=True,
1118
                            help="Don't shutdown the instance (unsafe)")
1119

    
1120
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1121
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1122
                         help="Maximum time to wait")
1123

    
1124
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1125
                         dest="shutdown_timeout", type="int",
1126
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1127
                         help="Maximum time to wait for instance shutdown")
1128

    
1129
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1130
                          default=None,
1131
                          help=("Number of seconds between repetions of the"
1132
                                " command"))
1133

    
1134
EARLY_RELEASE_OPT = cli_option("--early-release",
1135
                               dest="early_release", default=False,
1136
                               action="store_true",
1137
                               help="Release the locks on the secondary"
1138
                               " node(s) early")
1139

    
1140
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1141
                                  dest="new_cluster_cert",
1142
                                  default=False, action="store_true",
1143
                                  help="Generate a new cluster certificate")
1144

    
1145
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1146
                           default=None,
1147
                           help="File containing new RAPI certificate")
1148

    
1149
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1150
                               default=None, action="store_true",
1151
                               help=("Generate a new self-signed RAPI"
1152
                                     " certificate"))
1153

    
1154
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1155
                           default=None,
1156
                           help="File containing new SPICE certificate")
1157

    
1158
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1159
                           default=None,
1160
                           help="File containing the certificate of the CA"
1161
                                " which signed the SPICE certificate")
1162

    
1163
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1164
                               dest="new_spice_cert", default=None,
1165
                               action="store_true",
1166
                               help=("Generate a new self-signed SPICE"
1167
                                     " certificate"))
1168

    
1169
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1170
                                    dest="new_confd_hmac_key",
1171
                                    default=False, action="store_true",
1172
                                    help=("Create a new HMAC key for %s" %
1173
                                          constants.CONFD))
1174

    
1175
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1176
                                       dest="cluster_domain_secret",
1177
                                       default=None,
1178
                                       help=("Load new new cluster domain"
1179
                                             " secret from file"))
1180

    
1181
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1182
                                           dest="new_cluster_domain_secret",
1183
                                           default=False, action="store_true",
1184
                                           help=("Create a new cluster domain"
1185
                                                 " secret"))
1186

    
1187
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1188
                              dest="use_replication_network",
1189
                              help="Whether to use the replication network"
1190
                              " for talking to the nodes",
1191
                              action="store_true", default=False)
1192

    
1193
MAINTAIN_NODE_HEALTH_OPT = \
1194
    cli_option("--maintain-node-health", dest="maintain_node_health",
1195
               metavar=_YORNO, default=None, type="bool",
1196
               help="Configure the cluster to automatically maintain node"
1197
               " health, by shutting down unknown instances, shutting down"
1198
               " unknown DRBD devices, etc.")
1199

    
1200
IDENTIFY_DEFAULTS_OPT = \
1201
    cli_option("--identify-defaults", dest="identify_defaults",
1202
               default=False, action="store_true",
1203
               help="Identify which saved instance parameters are equal to"
1204
               " the current cluster defaults and set them as such, instead"
1205
               " of marking them as overridden")
1206

    
1207
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1208
                         action="store", dest="uid_pool",
1209
                         help=("A list of user-ids or user-id"
1210
                               " ranges separated by commas"))
1211

    
1212
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1213
                          action="store", dest="add_uids",
1214
                          help=("A list of user-ids or user-id"
1215
                                " ranges separated by commas, to be"
1216
                                " added to the user-id pool"))
1217

    
1218
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1219
                             action="store", dest="remove_uids",
1220
                             help=("A list of user-ids or user-id"
1221
                                   " ranges separated by commas, to be"
1222
                                   " removed from the user-id pool"))
1223

    
1224
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1225
                             action="store", dest="reserved_lvs",
1226
                             help=("A comma-separated list of reserved"
1227
                                   " logical volumes names, that will be"
1228
                                   " ignored by cluster verify"))
1229

    
1230
ROMAN_OPT = cli_option("--roman",
1231
                       dest="roman_integers", default=False,
1232
                       action="store_true",
1233
                       help="Use roman numbers for positive integers")
1234

    
1235
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1236
                             action="store", default=None,
1237
                             help="Specifies usermode helper for DRBD")
1238

    
1239
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1240
                                action="store_false", default=True,
1241
                                help="Disable support for DRBD")
1242

    
1243
PRIMARY_IP_VERSION_OPT = \
1244
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1245
               action="store", dest="primary_ip_version",
1246
               metavar="%d|%d" % (constants.IP4_VERSION,
1247
                                  constants.IP6_VERSION),
1248
               help="Cluster-wide IP version for primary IP")
1249

    
1250
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1251
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1252
                          choices=_PRIONAME_TO_VALUE.keys(),
1253
                          help="Priority for opcode processing")
1254

    
1255
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1256
                        type="bool", default=None, metavar=_YORNO,
1257
                        help="Sets the hidden flag on the OS")
1258

    
1259
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1260
                        type="bool", default=None, metavar=_YORNO,
1261
                        help="Sets the blacklisted flag on the OS")
1262

    
1263
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1264
                                     type="bool", metavar=_YORNO,
1265
                                     dest="prealloc_wipe_disks",
1266
                                     help=("Wipe disks prior to instance"
1267
                                           " creation"))
1268

    
1269
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1270
                             type="keyval", default=None,
1271
                             help="Node parameters")
1272

    
1273
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1274
                              action="store", metavar="POLICY", default=None,
1275
                              help="Allocation policy for the node group")
1276

    
1277
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1278
                              type="bool", metavar=_YORNO,
1279
                              dest="node_powered",
1280
                              help="Specify if the SoR for node is powered")
1281

    
1282
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1283
                         default=constants.OOB_TIMEOUT,
1284
                         help="Maximum time to wait for out-of-band helper")
1285

    
1286
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1287
                             default=constants.OOB_POWER_DELAY,
1288
                             help="Time in seconds to wait between power-ons")
1289

    
1290
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1291
                              action="store_true", default=False,
1292
                              help=("Whether command argument should be treated"
1293
                                    " as filter"))
1294

    
1295
NO_REMEMBER_OPT = cli_option("--no-remember",
1296
                             dest="no_remember",
1297
                             action="store_true", default=False,
1298
                             help="Perform but do not record the change"
1299
                             " in the configuration")
1300

    
1301
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1302
                              default=False, action="store_true",
1303
                              help="Evacuate primary instances only")
1304

    
1305
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1306
                                default=False, action="store_true",
1307
                                help="Evacuate secondary instances only"
1308
                                     " (applies only to internally mirrored"
1309
                                     " disk templates, e.g. %s)" %
1310
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1311

    
1312
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1313
                                action="store_true", default=False,
1314
                                help="Pause instance at startup")
1315

    
1316
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1317
                          help="Destination node group (name or uuid)",
1318
                          default=None, action="append",
1319
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1320

    
1321
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1322
                               action="append", dest="ignore_errors",
1323
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1324
                               help="Error code to be ignored")
1325

    
1326
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1327
                            action="append",
1328
                            help=("Specify disk state information in the format"
1329
                                  " storage_type/identifier:option=value,..."),
1330
                            type="identkeyval")
1331

    
1332
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1333
                          action="append",
1334
                          help=("Specify hypervisor state information in the"
1335
                                " format hypervisor:option=value,..."),
1336
                          type="identkeyval")
1337

    
1338
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1339
                                action="store_true", default=False,
1340
                                help="Ignore instance policy violations")
1341

    
1342

    
1343
#: Options provided by all commands
1344
COMMON_OPTS = [DEBUG_OPT]
1345

    
1346
# common options for creating instances. add and import then add their own
1347
# specific ones.
1348
COMMON_CREATE_OPTS = [
1349
  BACKEND_OPT,
1350
  DISK_OPT,
1351
  DISK_TEMPLATE_OPT,
1352
  FILESTORE_DIR_OPT,
1353
  FILESTORE_DRIVER_OPT,
1354
  HYPERVISOR_OPT,
1355
  IALLOCATOR_OPT,
1356
  NET_OPT,
1357
  NODE_PLACEMENT_OPT,
1358
  NOIPCHECK_OPT,
1359
  NONAMECHECK_OPT,
1360
  NONICS_OPT,
1361
  NWSYNC_OPT,
1362
  OSPARAMS_OPT,
1363
  OS_SIZE_OPT,
1364
  SUBMIT_OPT,
1365
  TAG_ADD_OPT,
1366
  DRY_RUN_OPT,
1367
  PRIORITY_OPT,
1368
  ]
1369

    
1370

    
1371
def _ParseArgs(argv, commands, aliases, env_override):
1372
  """Parser for the command line arguments.
1373

1374
  This function parses the arguments and returns the function which
1375
  must be executed together with its (modified) arguments.
1376

1377
  @param argv: the command line
1378
  @param commands: dictionary with special contents, see the design
1379
      doc for cmdline handling
1380
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
1381
  @param env_override: list of env variables allowed for default args
1382

1383
  """
1384
  assert not (env_override - set(commands))
1385

    
1386
  if len(argv) == 0:
1387
    binary = "<command>"
1388
  else:
1389
    binary = argv[0].split("/")[-1]
1390

    
1391
  if len(argv) > 1 and argv[1] == "--version":
1392
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1393
             constants.RELEASE_VERSION)
1394
    # Quit right away. That way we don't have to care about this special
1395
    # argument. optparse.py does it the same.
1396
    sys.exit(0)
1397

    
1398
  if len(argv) < 2 or not (argv[1] in commands or
1399
                           argv[1] in aliases):
1400
    # let's do a nice thing
1401
    sortedcmds = commands.keys()
1402
    sortedcmds.sort()
1403

    
1404
    ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1405
    ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1406
    ToStdout("")
1407

    
1408
    # compute the max line length for cmd + usage
1409
    mlen = max([len(" %s" % cmd) for cmd in commands])
1410
    mlen = min(60, mlen) # should not get here...
1411

    
1412
    # and format a nice command list
1413
    ToStdout("Commands:")
1414
    for cmd in sortedcmds:
1415
      cmdstr = " %s" % (cmd,)
1416
      help_text = commands[cmd][4]
1417
      help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1418
      ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1419
      for line in help_lines:
1420
        ToStdout("%-*s   %s", mlen, "", line)
1421

    
1422
    ToStdout("")
1423

    
1424
    return None, None, None
1425

    
1426
  # get command, unalias it, and look it up in commands
1427
  cmd = argv.pop(1)
1428
  if cmd in aliases:
1429
    if cmd in commands:
1430
      raise errors.ProgrammerError("Alias '%s' overrides an existing"
1431
                                   " command" % cmd)
1432

    
1433
    if aliases[cmd] not in commands:
1434
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1435
                                   " command '%s'" % (cmd, aliases[cmd]))
1436

    
1437
    cmd = aliases[cmd]
1438

    
1439
  if cmd in env_override:
1440
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1441
    env_args = os.environ.get(args_env_name)
1442
    if env_args:
1443
      argv = utils.InsertAtPos(argv, 1, shlex.split(env_args))
1444

    
1445
  func, args_def, parser_opts, usage, description = commands[cmd]
1446
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1447
                        description=description,
1448
                        formatter=TitledHelpFormatter(),
1449
                        usage="%%prog %s %s" % (cmd, usage))
1450
  parser.disable_interspersed_args()
1451
  options, args = parser.parse_args(args=argv[1:])
1452

    
1453
  if not _CheckArguments(cmd, args_def, args):
1454
    return None, None, None
1455

    
1456
  return func, options, args
1457

    
1458

    
1459
def _CheckArguments(cmd, args_def, args):
1460
  """Verifies the arguments using the argument definition.
1461

1462
  Algorithm:
1463

1464
    1. Abort with error if values specified by user but none expected.
1465

1466
    1. For each argument in definition
1467

1468
      1. Keep running count of minimum number of values (min_count)
1469
      1. Keep running count of maximum number of values (max_count)
1470
      1. If it has an unlimited number of values
1471

1472
        1. Abort with error if it's not the last argument in the definition
1473

1474
    1. If last argument has limited number of values
1475

1476
      1. Abort with error if number of values doesn't match or is too large
1477

1478
    1. Abort with error if user didn't pass enough values (min_count)
1479

1480
  """
1481
  if args and not args_def:
1482
    ToStderr("Error: Command %s expects no arguments", cmd)
1483
    return False
1484

    
1485
  min_count = None
1486
  max_count = None
1487
  check_max = None
1488

    
1489
  last_idx = len(args_def) - 1
1490

    
1491
  for idx, arg in enumerate(args_def):
1492
    if min_count is None:
1493
      min_count = arg.min
1494
    elif arg.min is not None:
1495
      min_count += arg.min
1496

    
1497
    if max_count is None:
1498
      max_count = arg.max
1499
    elif arg.max is not None:
1500
      max_count += arg.max
1501

    
1502
    if idx == last_idx:
1503
      check_max = (arg.max is not None)
1504

    
1505
    elif arg.max is None:
1506
      raise errors.ProgrammerError("Only the last argument can have max=None")
1507

    
1508
  if check_max:
1509
    # Command with exact number of arguments
1510
    if (min_count is not None and max_count is not None and
1511
        min_count == max_count and len(args) != min_count):
1512
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1513
      return False
1514

    
1515
    # Command with limited number of arguments
1516
    if max_count is not None and len(args) > max_count:
1517
      ToStderr("Error: Command %s expects only %d argument(s)",
1518
               cmd, max_count)
1519
      return False
1520

    
1521
  # Command with some required arguments
1522
  if min_count is not None and len(args) < min_count:
1523
    ToStderr("Error: Command %s expects at least %d argument(s)",
1524
             cmd, min_count)
1525
    return False
1526

    
1527
  return True
1528

    
1529

    
1530
def SplitNodeOption(value):
1531
  """Splits the value of a --node option.
1532

1533
  """
1534
  if value and ":" in value:
1535
    return value.split(":", 1)
1536
  else:
1537
    return (value, None)
1538

    
1539

    
1540
def CalculateOSNames(os_name, os_variants):
1541
  """Calculates all the names an OS can be called, according to its variants.
1542

1543
  @type os_name: string
1544
  @param os_name: base name of the os
1545
  @type os_variants: list or None
1546
  @param os_variants: list of supported variants
1547
  @rtype: list
1548
  @return: list of valid names
1549

1550
  """
1551
  if os_variants:
1552
    return ["%s+%s" % (os_name, v) for v in os_variants]
1553
  else:
1554
    return [os_name]
1555

    
1556

    
1557
def ParseFields(selected, default):
1558
  """Parses the values of "--field"-like options.
1559

1560
  @type selected: string or None
1561
  @param selected: User-selected options
1562
  @type default: list
1563
  @param default: Default fields
1564

1565
  """
1566
  if selected is None:
1567
    return default
1568

    
1569
  if selected.startswith("+"):
1570
    return default + selected[1:].split(",")
1571

    
1572
  return selected.split(",")
1573

    
1574

    
1575
UsesRPC = rpc.RunWithRPC
1576

    
1577

    
1578
def AskUser(text, choices=None):
1579
  """Ask the user a question.
1580

1581
  @param text: the question to ask
1582

1583
  @param choices: list with elements tuples (input_char, return_value,
1584
      description); if not given, it will default to: [('y', True,
1585
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1586
      note that the '?' char is reserved for help
1587

1588
  @return: one of the return values from the choices list; if input is
1589
      not possible (i.e. not running with a tty, we return the last
1590
      entry from the list
1591

1592
  """
1593
  if choices is None:
1594
    choices = [("y", True, "Perform the operation"),
1595
               ("n", False, "Do not perform the operation")]
1596
  if not choices or not isinstance(choices, list):
1597
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1598
  for entry in choices:
1599
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1600
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1601

    
1602
  answer = choices[-1][1]
1603
  new_text = []
1604
  for line in text.splitlines():
1605
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1606
  text = "\n".join(new_text)
1607
  try:
1608
    f = file("/dev/tty", "a+")
1609
  except IOError:
1610
    return answer
1611
  try:
1612
    chars = [entry[0] for entry in choices]
1613
    chars[-1] = "[%s]" % chars[-1]
1614
    chars.append("?")
1615
    maps = dict([(entry[0], entry[1]) for entry in choices])
1616
    while True:
1617
      f.write(text)
1618
      f.write("\n")
1619
      f.write("/".join(chars))
1620
      f.write(": ")
1621
      line = f.readline(2).strip().lower()
1622
      if line in maps:
1623
        answer = maps[line]
1624
        break
1625
      elif line == "?":
1626
        for entry in choices:
1627
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1628
        f.write("\n")
1629
        continue
1630
  finally:
1631
    f.close()
1632
  return answer
1633

    
1634

    
1635
class JobSubmittedException(Exception):
1636
  """Job was submitted, client should exit.
1637

1638
  This exception has one argument, the ID of the job that was
1639
  submitted. The handler should print this ID.
1640

1641
  This is not an error, just a structured way to exit from clients.
1642

1643
  """
1644

    
1645

    
1646
def SendJob(ops, cl=None):
1647
  """Function to submit an opcode without waiting for the results.
1648

1649
  @type ops: list
1650
  @param ops: list of opcodes
1651
  @type cl: luxi.Client
1652
  @param cl: the luxi client to use for communicating with the master;
1653
             if None, a new client will be created
1654

1655
  """
1656
  if cl is None:
1657
    cl = GetClient()
1658

    
1659
  job_id = cl.SubmitJob(ops)
1660

    
1661
  return job_id
1662

    
1663

    
1664
def GenericPollJob(job_id, cbs, report_cbs):
1665
  """Generic job-polling function.
1666

1667
  @type job_id: number
1668
  @param job_id: Job ID
1669
  @type cbs: Instance of L{JobPollCbBase}
1670
  @param cbs: Data callbacks
1671
  @type report_cbs: Instance of L{JobPollReportCbBase}
1672
  @param report_cbs: Reporting callbacks
1673

1674
  """
1675
  prev_job_info = None
1676
  prev_logmsg_serial = None
1677

    
1678
  status = None
1679

    
1680
  while True:
1681
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1682
                                      prev_logmsg_serial)
1683
    if not result:
1684
      # job not found, go away!
1685
      raise errors.JobLost("Job with id %s lost" % job_id)
1686

    
1687
    if result == constants.JOB_NOTCHANGED:
1688
      report_cbs.ReportNotChanged(job_id, status)
1689

    
1690
      # Wait again
1691
      continue
1692

    
1693
    # Split result, a tuple of (field values, log entries)
1694
    (job_info, log_entries) = result
1695
    (status, ) = job_info
1696

    
1697
    if log_entries:
1698
      for log_entry in log_entries:
1699
        (serial, timestamp, log_type, message) = log_entry
1700
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1701
                                    log_type, message)
1702
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1703

    
1704
    # TODO: Handle canceled and archived jobs
1705
    elif status in (constants.JOB_STATUS_SUCCESS,
1706
                    constants.JOB_STATUS_ERROR,
1707
                    constants.JOB_STATUS_CANCELING,
1708
                    constants.JOB_STATUS_CANCELED):
1709
      break
1710

    
1711
    prev_job_info = job_info
1712

    
1713
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1714
  if not jobs:
1715
    raise errors.JobLost("Job with id %s lost" % job_id)
1716

    
1717
  status, opstatus, result = jobs[0]
1718

    
1719
  if status == constants.JOB_STATUS_SUCCESS:
1720
    return result
1721

    
1722
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1723
    raise errors.OpExecError("Job was canceled")
1724

    
1725
  has_ok = False
1726
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1727
    if status == constants.OP_STATUS_SUCCESS:
1728
      has_ok = True
1729
    elif status == constants.OP_STATUS_ERROR:
1730
      errors.MaybeRaise(msg)
1731

    
1732
      if has_ok:
1733
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1734
                                 (idx, msg))
1735

    
1736
      raise errors.OpExecError(str(msg))
1737

    
1738
  # default failure mode
1739
  raise errors.OpExecError(result)
1740

    
1741

    
1742
class JobPollCbBase:
1743
  """Base class for L{GenericPollJob} callbacks.
1744

1745
  """
1746
  def __init__(self):
1747
    """Initializes this class.
1748

1749
    """
1750

    
1751
  def WaitForJobChangeOnce(self, job_id, fields,
1752
                           prev_job_info, prev_log_serial):
1753
    """Waits for changes on a job.
1754

1755
    """
1756
    raise NotImplementedError()
1757

    
1758
  def QueryJobs(self, job_ids, fields):
1759
    """Returns the selected fields for the selected job IDs.
1760

1761
    @type job_ids: list of numbers
1762
    @param job_ids: Job IDs
1763
    @type fields: list of strings
1764
    @param fields: Fields
1765

1766
    """
1767
    raise NotImplementedError()
1768

    
1769

    
1770
class JobPollReportCbBase:
1771
  """Base class for L{GenericPollJob} reporting callbacks.
1772

1773
  """
1774
  def __init__(self):
1775
    """Initializes this class.
1776

1777
    """
1778

    
1779
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1780
    """Handles a log message.
1781

1782
    """
1783
    raise NotImplementedError()
1784

    
1785
  def ReportNotChanged(self, job_id, status):
1786
    """Called for if a job hasn't changed in a while.
1787

1788
    @type job_id: number
1789
    @param job_id: Job ID
1790
    @type status: string or None
1791
    @param status: Job status if available
1792

1793
    """
1794
    raise NotImplementedError()
1795

    
1796

    
1797
class _LuxiJobPollCb(JobPollCbBase):
1798
  def __init__(self, cl):
1799
    """Initializes this class.
1800

1801
    """
1802
    JobPollCbBase.__init__(self)
1803
    self.cl = cl
1804

    
1805
  def WaitForJobChangeOnce(self, job_id, fields,
1806
                           prev_job_info, prev_log_serial):
1807
    """Waits for changes on a job.
1808

1809
    """
1810
    return self.cl.WaitForJobChangeOnce(job_id, fields,
1811
                                        prev_job_info, prev_log_serial)
1812

    
1813
  def QueryJobs(self, job_ids, fields):
1814
    """Returns the selected fields for the selected job IDs.
1815

1816
    """
1817
    return self.cl.QueryJobs(job_ids, fields)
1818

    
1819

    
1820
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1821
  def __init__(self, feedback_fn):
1822
    """Initializes this class.
1823

1824
    """
1825
    JobPollReportCbBase.__init__(self)
1826

    
1827
    self.feedback_fn = feedback_fn
1828

    
1829
    assert callable(feedback_fn)
1830

    
1831
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1832
    """Handles a log message.
1833

1834
    """
1835
    self.feedback_fn((timestamp, log_type, log_msg))
1836

    
1837
  def ReportNotChanged(self, job_id, status):
1838
    """Called if a job hasn't changed in a while.
1839

1840
    """
1841
    # Ignore
1842

    
1843

    
1844
class StdioJobPollReportCb(JobPollReportCbBase):
1845
  def __init__(self):
1846
    """Initializes this class.
1847

1848
    """
1849
    JobPollReportCbBase.__init__(self)
1850

    
1851
    self.notified_queued = False
1852
    self.notified_waitlock = False
1853

    
1854
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1855
    """Handles a log message.
1856

1857
    """
1858
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
1859
             FormatLogMessage(log_type, log_msg))
1860

    
1861
  def ReportNotChanged(self, job_id, status):
1862
    """Called if a job hasn't changed in a while.
1863

1864
    """
1865
    if status is None:
1866
      return
1867

    
1868
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1869
      ToStderr("Job %s is waiting in queue", job_id)
1870
      self.notified_queued = True
1871

    
1872
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
1873
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1874
      self.notified_waitlock = True
1875

    
1876

    
1877
def FormatLogMessage(log_type, log_msg):
1878
  """Formats a job message according to its type.
1879

1880
  """
1881
  if log_type != constants.ELOG_MESSAGE:
1882
    log_msg = str(log_msg)
1883

    
1884
  return utils.SafeEncode(log_msg)
1885

    
1886

    
1887
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1888
  """Function to poll for the result of a job.
1889

1890
  @type job_id: job identified
1891
  @param job_id: the job to poll for results
1892
  @type cl: luxi.Client
1893
  @param cl: the luxi client to use for communicating with the master;
1894
             if None, a new client will be created
1895

1896
  """
1897
  if cl is None:
1898
    cl = GetClient()
1899

    
1900
  if reporter is None:
1901
    if feedback_fn:
1902
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
1903
    else:
1904
      reporter = StdioJobPollReportCb()
1905
  elif feedback_fn:
1906
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
1907

    
1908
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1909

    
1910

    
1911
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1912
  """Legacy function to submit an opcode.
1913

1914
  This is just a simple wrapper over the construction of the processor
1915
  instance. It should be extended to better handle feedback and
1916
  interaction functions.
1917

1918
  """
1919
  if cl is None:
1920
    cl = GetClient()
1921

    
1922
  SetGenericOpcodeOpts([op], opts)
1923

    
1924
  job_id = SendJob([op], cl=cl)
1925

    
1926
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1927
                       reporter=reporter)
1928

    
1929
  return op_results[0]
1930

    
1931

    
1932
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1933
  """Wrapper around SubmitOpCode or SendJob.
1934

1935
  This function will decide, based on the 'opts' parameter, whether to
1936
  submit and wait for the result of the opcode (and return it), or
1937
  whether to just send the job and print its identifier. It is used in
1938
  order to simplify the implementation of the '--submit' option.
1939

1940
  It will also process the opcodes if we're sending the via SendJob
1941
  (otherwise SubmitOpCode does it).
1942

1943
  """
1944
  if opts and opts.submit_only:
1945
    job = [op]
1946
    SetGenericOpcodeOpts(job, opts)
1947
    job_id = SendJob(job, cl=cl)
1948
    raise JobSubmittedException(job_id)
1949
  else:
1950
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1951

    
1952

    
1953
def SetGenericOpcodeOpts(opcode_list, options):
1954
  """Processor for generic options.
1955

1956
  This function updates the given opcodes based on generic command
1957
  line options (like debug, dry-run, etc.).
1958

1959
  @param opcode_list: list of opcodes
1960
  @param options: command line options or None
1961
  @return: None (in-place modification)
1962

1963
  """
1964
  if not options:
1965
    return
1966
  for op in opcode_list:
1967
    op.debug_level = options.debug
1968
    if hasattr(options, "dry_run"):
1969
      op.dry_run = options.dry_run
1970
    if getattr(options, "priority", None) is not None:
1971
      op.priority = _PRIONAME_TO_VALUE[options.priority]
1972

    
1973

    
1974
def GetClient():
1975
  # TODO: Cache object?
1976
  try:
1977
    client = luxi.Client()
1978
  except luxi.NoMasterError:
1979
    ss = ssconf.SimpleStore()
1980

    
1981
    # Try to read ssconf file
1982
    try:
1983
      ss.GetMasterNode()
1984
    except errors.ConfigurationError:
1985
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
1986
                                 " not part of a cluster")
1987

    
1988
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
1989
    if master != myself:
1990
      raise errors.OpPrereqError("This is not the master node, please connect"
1991
                                 " to node '%s' and rerun the command" %
1992
                                 master)
1993
    raise
1994
  return client
1995

    
1996

    
1997
def FormatError(err):
1998
  """Return a formatted error message for a given error.
1999

2000
  This function takes an exception instance and returns a tuple
2001
  consisting of two values: first, the recommended exit code, and
2002
  second, a string describing the error message (not
2003
  newline-terminated).
2004

2005
  """
2006
  retcode = 1
2007
  obuf = StringIO()
2008
  msg = str(err)
2009
  if isinstance(err, errors.ConfigurationError):
2010
    txt = "Corrupt configuration file: %s" % msg
2011
    logging.error(txt)
2012
    obuf.write(txt + "\n")
2013
    obuf.write("Aborting.")
2014
    retcode = 2
2015
  elif isinstance(err, errors.HooksAbort):
2016
    obuf.write("Failure: hooks execution failed:\n")
2017
    for node, script, out in err.args[0]:
2018
      if out:
2019
        obuf.write("  node: %s, script: %s, output: %s\n" %
2020
                   (node, script, out))
2021
      else:
2022
        obuf.write("  node: %s, script: %s (no output)\n" %
2023
                   (node, script))
2024
  elif isinstance(err, errors.HooksFailure):
2025
    obuf.write("Failure: hooks general failure: %s" % msg)
2026
  elif isinstance(err, errors.ResolverError):
2027
    this_host = netutils.Hostname.GetSysName()
2028
    if err.args[0] == this_host:
2029
      msg = "Failure: can't resolve my own hostname ('%s')"
2030
    else:
2031
      msg = "Failure: can't resolve hostname '%s'"
2032
    obuf.write(msg % err.args[0])
2033
  elif isinstance(err, errors.OpPrereqError):
2034
    if len(err.args) == 2:
2035
      obuf.write("Failure: prerequisites not met for this"
2036
               " operation:\nerror type: %s, error details:\n%s" %
2037
                 (err.args[1], err.args[0]))
2038
    else:
2039
      obuf.write("Failure: prerequisites not met for this"
2040
                 " operation:\n%s" % msg)
2041
  elif isinstance(err, errors.OpExecError):
2042
    obuf.write("Failure: command execution error:\n%s" % msg)
2043
  elif isinstance(err, errors.TagError):
2044
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2045
  elif isinstance(err, errors.JobQueueDrainError):
2046
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2047
               " accept new requests\n")
2048
  elif isinstance(err, errors.JobQueueFull):
2049
    obuf.write("Failure: the job queue is full and doesn't accept new"
2050
               " job submissions until old jobs are archived\n")
2051
  elif isinstance(err, errors.TypeEnforcementError):
2052
    obuf.write("Parameter Error: %s" % msg)
2053
  elif isinstance(err, errors.ParameterError):
2054
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2055
  elif isinstance(err, luxi.NoMasterError):
2056
    obuf.write("Cannot communicate with the master daemon.\nIs it running"
2057
               " and listening for connections?")
2058
  elif isinstance(err, luxi.TimeoutError):
2059
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2060
               " been submitted and will continue to run even if the call"
2061
               " timed out. Useful commands in this situation are \"gnt-job"
2062
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2063
    obuf.write(msg)
2064
  elif isinstance(err, luxi.PermissionError):
2065
    obuf.write("It seems you don't have permissions to connect to the"
2066
               " master daemon.\nPlease retry as a different user.")
2067
  elif isinstance(err, luxi.ProtocolError):
2068
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2069
               "%s" % msg)
2070
  elif isinstance(err, errors.JobLost):
2071
    obuf.write("Error checking job status: %s" % msg)
2072
  elif isinstance(err, errors.QueryFilterParseError):
2073
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2074
    obuf.write("\n".join(err.GetDetails()))
2075
  elif isinstance(err, errors.GenericError):
2076
    obuf.write("Unhandled Ganeti error: %s" % msg)
2077
  elif isinstance(err, JobSubmittedException):
2078
    obuf.write("JobID: %s\n" % err.args[0])
2079
    retcode = 0
2080
  else:
2081
    obuf.write("Unhandled exception: %s" % msg)
2082
  return retcode, obuf.getvalue().rstrip("\n")
2083

    
2084

    
2085
def GenericMain(commands, override=None, aliases=None,
2086
                env_override=frozenset()):
2087
  """Generic main function for all the gnt-* commands.
2088

2089
  @param commands: a dictionary with a special structure, see the design doc
2090
                   for command line handling.
2091
  @param override: if not None, we expect a dictionary with keys that will
2092
                   override command line options; this can be used to pass
2093
                   options from the scripts to generic functions
2094
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2095
  @param env_override: list of environment names which are allowed to submit
2096
                       default args for commands
2097

2098
  """
2099
  # save the program name and the entire command line for later logging
2100
  if sys.argv:
2101
    binary = os.path.basename(sys.argv[0]) or sys.argv[0]
2102
    if len(sys.argv) >= 2:
2103
      binary += " " + sys.argv[1]
2104
      old_cmdline = " ".join(sys.argv[2:])
2105
    else:
2106
      old_cmdline = ""
2107
  else:
2108
    binary = "<unknown program>"
2109
    old_cmdline = ""
2110

    
2111
  if aliases is None:
2112
    aliases = {}
2113

    
2114
  try:
2115
    func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override)
2116
  except errors.ParameterError, err:
2117
    result, err_msg = FormatError(err)
2118
    ToStderr(err_msg)
2119
    return 1
2120

    
2121
  if func is None: # parse error
2122
    return 1
2123

    
2124
  if override is not None:
2125
    for key, val in override.iteritems():
2126
      setattr(options, key, val)
2127

    
2128
  utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
2129
                     stderr_logging=True)
2130

    
2131
  if old_cmdline:
2132
    logging.info("run with arguments '%s'", old_cmdline)
2133
  else:
2134
    logging.info("run with no arguments")
2135

    
2136
  try:
2137
    result = func(options, args)
2138
  except (errors.GenericError, luxi.ProtocolError,
2139
          JobSubmittedException), err:
2140
    result, err_msg = FormatError(err)
2141
    logging.exception("Error during command processing")
2142
    ToStderr(err_msg)
2143
  except KeyboardInterrupt:
2144
    result = constants.EXIT_FAILURE
2145
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2146
             " might have been submitted and"
2147
             " will continue to run in the background.")
2148
  except IOError, err:
2149
    if err.errno == errno.EPIPE:
2150
      # our terminal went away, we'll exit
2151
      sys.exit(constants.EXIT_FAILURE)
2152
    else:
2153
      raise
2154

    
2155
  return result
2156

    
2157

    
2158
def ParseNicOption(optvalue):
2159
  """Parses the value of the --net option(s).
2160

2161
  """
2162
  try:
2163
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2164
  except (TypeError, ValueError), err:
2165
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
2166

    
2167
  nics = [{}] * nic_max
2168
  for nidx, ndict in optvalue:
2169
    nidx = int(nidx)
2170

    
2171
    if not isinstance(ndict, dict):
2172
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2173
                                 " got %s" % (nidx, ndict))
2174

    
2175
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2176

    
2177
    nics[nidx] = ndict
2178

    
2179
  return nics
2180

    
2181

    
2182
def GenericInstanceCreate(mode, opts, args):
2183
  """Add an instance to the cluster via either creation or import.
2184

2185
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2186
  @param opts: the command line options selected by the user
2187
  @type args: list
2188
  @param args: should contain only one element, the new instance name
2189
  @rtype: int
2190
  @return: the desired exit code
2191

2192
  """
2193
  instance = args[0]
2194

    
2195
  (pnode, snode) = SplitNodeOption(opts.node)
2196

    
2197
  hypervisor = None
2198
  hvparams = {}
2199
  if opts.hypervisor:
2200
    hypervisor, hvparams = opts.hypervisor
2201

    
2202
  if opts.nics:
2203
    nics = ParseNicOption(opts.nics)
2204
  elif opts.no_nics:
2205
    # no nics
2206
    nics = []
2207
  elif mode == constants.INSTANCE_CREATE:
2208
    # default of one nic, all auto
2209
    nics = [{}]
2210
  else:
2211
    # mode == import
2212
    nics = []
2213

    
2214
  if opts.disk_template == constants.DT_DISKLESS:
2215
    if opts.disks or opts.sd_size is not None:
2216
      raise errors.OpPrereqError("Diskless instance but disk"
2217
                                 " information passed")
2218
    disks = []
2219
  else:
2220
    if (not opts.disks and not opts.sd_size
2221
        and mode == constants.INSTANCE_CREATE):
2222
      raise errors.OpPrereqError("No disk information specified")
2223
    if opts.disks and opts.sd_size is not None:
2224
      raise errors.OpPrereqError("Please use either the '--disk' or"
2225
                                 " '-s' option")
2226
    if opts.sd_size is not None:
2227
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2228

    
2229
    if opts.disks:
2230
      try:
2231
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2232
      except ValueError, err:
2233
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2234
      disks = [{}] * disk_max
2235
    else:
2236
      disks = []
2237
    for didx, ddict in opts.disks:
2238
      didx = int(didx)
2239
      if not isinstance(ddict, dict):
2240
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2241
        raise errors.OpPrereqError(msg)
2242
      elif constants.IDISK_SIZE in ddict:
2243
        if constants.IDISK_ADOPT in ddict:
2244
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2245
                                     " (disk %d)" % didx)
2246
        try:
2247
          ddict[constants.IDISK_SIZE] = \
2248
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2249
        except ValueError, err:
2250
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2251
                                     (didx, err))
2252
      elif constants.IDISK_ADOPT in ddict:
2253
        if mode == constants.INSTANCE_IMPORT:
2254
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2255
                                     " import")
2256
        ddict[constants.IDISK_SIZE] = 0
2257
      else:
2258
        raise errors.OpPrereqError("Missing size or adoption source for"
2259
                                   " disk %d" % didx)
2260
      disks[didx] = ddict
2261

    
2262
  if opts.tags is not None:
2263
    tags = opts.tags.split(",")
2264
  else:
2265
    tags = []
2266

    
2267
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2268
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2269

    
2270
  if mode == constants.INSTANCE_CREATE:
2271
    start = opts.start
2272
    os_type = opts.os
2273
    force_variant = opts.force_variant
2274
    src_node = None
2275
    src_path = None
2276
    no_install = opts.no_install
2277
    identify_defaults = False
2278
  elif mode == constants.INSTANCE_IMPORT:
2279
    start = False
2280
    os_type = None
2281
    force_variant = False
2282
    src_node = opts.src_node
2283
    src_path = opts.src_dir
2284
    no_install = None
2285
    identify_defaults = opts.identify_defaults
2286
  else:
2287
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2288

    
2289
  op = opcodes.OpInstanceCreate(instance_name=instance,
2290
                                disks=disks,
2291
                                disk_template=opts.disk_template,
2292
                                nics=nics,
2293
                                pnode=pnode, snode=snode,
2294
                                ip_check=opts.ip_check,
2295
                                name_check=opts.name_check,
2296
                                wait_for_sync=opts.wait_for_sync,
2297
                                file_storage_dir=opts.file_storage_dir,
2298
                                file_driver=opts.file_driver,
2299
                                iallocator=opts.iallocator,
2300
                                hypervisor=hypervisor,
2301
                                hvparams=hvparams,
2302
                                beparams=opts.beparams,
2303
                                osparams=opts.osparams,
2304
                                mode=mode,
2305
                                start=start,
2306
                                os_type=os_type,
2307
                                force_variant=force_variant,
2308
                                src_node=src_node,
2309
                                src_path=src_path,
2310
                                tags=tags,
2311
                                no_install=no_install,
2312
                                identify_defaults=identify_defaults,
2313
                                ignore_ipolicy=opts.ignore_ipolicy)
2314

    
2315
  SubmitOrSend(op, opts)
2316
  return 0
2317

    
2318

    
2319
class _RunWhileClusterStoppedHelper:
2320
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2321

2322
  """
2323
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2324
    """Initializes this class.
2325

2326
    @type feedback_fn: callable
2327
    @param feedback_fn: Feedback function
2328
    @type cluster_name: string
2329
    @param cluster_name: Cluster name
2330
    @type master_node: string
2331
    @param master_node Master node name
2332
    @type online_nodes: list
2333
    @param online_nodes: List of names of online nodes
2334

2335
    """
2336
    self.feedback_fn = feedback_fn
2337
    self.cluster_name = cluster_name
2338
    self.master_node = master_node
2339
    self.online_nodes = online_nodes
2340

    
2341
    self.ssh = ssh.SshRunner(self.cluster_name)
2342

    
2343
    self.nonmaster_nodes = [name for name in online_nodes
2344
                            if name != master_node]
2345

    
2346
    assert self.master_node not in self.nonmaster_nodes
2347

    
2348
  def _RunCmd(self, node_name, cmd):
2349
    """Runs a command on the local or a remote machine.
2350

2351
    @type node_name: string
2352
    @param node_name: Machine name
2353
    @type cmd: list
2354
    @param cmd: Command
2355

2356
    """
2357
    if node_name is None or node_name == self.master_node:
2358
      # No need to use SSH
2359
      result = utils.RunCmd(cmd)
2360
    else:
2361
      result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2362

    
2363
    if result.failed:
2364
      errmsg = ["Failed to run command %s" % result.cmd]
2365
      if node_name:
2366
        errmsg.append("on node %s" % node_name)
2367
      errmsg.append(": exitcode %s and error %s" %
2368
                    (result.exit_code, result.output))
2369
      raise errors.OpExecError(" ".join(errmsg))
2370

    
2371
  def Call(self, fn, *args):
2372
    """Call function while all daemons are stopped.
2373

2374
    @type fn: callable
2375
    @param fn: Function to be called
2376

2377
    """
2378
    # Pause watcher by acquiring an exclusive lock on watcher state file
2379
    self.feedback_fn("Blocking watcher")
2380
    watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE)
2381
    try:
2382
      # TODO: Currently, this just blocks. There's no timeout.
2383
      # TODO: Should it be a shared lock?
2384
      watcher_block.Exclusive(blocking=True)
2385

    
2386
      # Stop master daemons, so that no new jobs can come in and all running
2387
      # ones are finished
2388
      self.feedback_fn("Stopping master daemons")
2389
      self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2390
      try:
2391
        # Stop daemons on all nodes
2392
        for node_name in self.online_nodes:
2393
          self.feedback_fn("Stopping daemons on %s" % node_name)
2394
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2395

    
2396
        # All daemons are shut down now
2397
        try:
2398
          return fn(self, *args)
2399
        except Exception, err:
2400
          _, errmsg = FormatError(err)
2401
          logging.exception("Caught exception")
2402
          self.feedback_fn(errmsg)
2403
          raise
2404
      finally:
2405
        # Start cluster again, master node last
2406
        for node_name in self.nonmaster_nodes + [self.master_node]:
2407
          self.feedback_fn("Starting daemons on %s" % node_name)
2408
          self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2409
    finally:
2410
      # Resume watcher
2411
      watcher_block.Close()
2412

    
2413

    
2414
def RunWhileClusterStopped(feedback_fn, fn, *args):
2415
  """Calls a function while all cluster daemons are stopped.
2416

2417
  @type feedback_fn: callable
2418
  @param feedback_fn: Feedback function
2419
  @type fn: callable
2420
  @param fn: Function to be called when daemons are stopped
2421

2422
  """
2423
  feedback_fn("Gathering cluster information")
2424

    
2425
  # This ensures we're running on the master daemon
2426
  cl = GetClient()
2427

    
2428
  (cluster_name, master_node) = \
2429
    cl.QueryConfigValues(["cluster_name", "master_node"])
2430

    
2431
  online_nodes = GetOnlineNodes([], cl=cl)
2432

    
2433
  # Don't keep a reference to the client. The master daemon will go away.
2434
  del cl
2435

    
2436
  assert master_node in online_nodes
2437

    
2438
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2439
                                       online_nodes).Call(fn, *args)
2440

    
2441

    
2442
def GenerateTable(headers, fields, separator, data,
2443
                  numfields=None, unitfields=None,
2444
                  units=None):
2445
  """Prints a table with headers and different fields.
2446

2447
  @type headers: dict
2448
  @param headers: dictionary mapping field names to headers for
2449
      the table
2450
  @type fields: list
2451
  @param fields: the field names corresponding to each row in
2452
      the data field
2453
  @param separator: the separator to be used; if this is None,
2454
      the default 'smart' algorithm is used which computes optimal
2455
      field width, otherwise just the separator is used between
2456
      each field
2457
  @type data: list
2458
  @param data: a list of lists, each sublist being one row to be output
2459
  @type numfields: list
2460
  @param numfields: a list with the fields that hold numeric
2461
      values and thus should be right-aligned
2462
  @type unitfields: list
2463
  @param unitfields: a list with the fields that hold numeric
2464
      values that should be formatted with the units field
2465
  @type units: string or None
2466
  @param units: the units we should use for formatting, or None for
2467
      automatic choice (human-readable for non-separator usage, otherwise
2468
      megabytes); this is a one-letter string
2469

2470
  """
2471
  if units is None:
2472
    if separator:
2473
      units = "m"
2474
    else:
2475
      units = "h"
2476

    
2477
  if numfields is None:
2478
    numfields = []
2479
  if unitfields is None:
2480
    unitfields = []
2481

    
2482
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2483
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2484

    
2485
  format_fields = []
2486
  for field in fields:
2487
    if headers and field not in headers:
2488
      # TODO: handle better unknown fields (either revert to old
2489
      # style of raising exception, or deal more intelligently with
2490
      # variable fields)
2491
      headers[field] = field
2492
    if separator is not None:
2493
      format_fields.append("%s")
2494
    elif numfields.Matches(field):
2495
      format_fields.append("%*s")
2496
    else:
2497
      format_fields.append("%-*s")
2498

    
2499
  if separator is None:
2500
    mlens = [0 for name in fields]
2501
    format_str = " ".join(format_fields)
2502
  else:
2503
    format_str = separator.replace("%", "%%").join(format_fields)
2504

    
2505
  for row in data:
2506
    if row is None:
2507
      continue
2508
    for idx, val in enumerate(row):
2509
      if unitfields.Matches(fields[idx]):
2510
        try:
2511
          val = int(val)
2512
        except (TypeError, ValueError):
2513
          pass
2514
        else:
2515
          val = row[idx] = utils.FormatUnit(val, units)
2516
      val = row[idx] = str(val)
2517
      if separator is None:
2518
        mlens[idx] = max(mlens[idx], len(val))
2519

    
2520
  result = []
2521
  if headers:
2522
    args = []
2523
    for idx, name in enumerate(fields):
2524
      hdr = headers[name]
2525
      if separator is None:
2526
        mlens[idx] = max(mlens[idx], len(hdr))
2527
        args.append(mlens[idx])
2528
      args.append(hdr)
2529
    result.append(format_str % tuple(args))
2530

    
2531
  if separator is None:
2532
    assert len(mlens) == len(fields)
2533

    
2534
    if fields and not numfields.Matches(fields[-1]):
2535
      mlens[-1] = 0
2536

    
2537
  for line in data:
2538
    args = []
2539
    if line is None:
2540
      line = ["-" for _ in fields]
2541
    for idx in range(len(fields)):
2542
      if separator is None:
2543
        args.append(mlens[idx])
2544
      args.append(line[idx])
2545
    result.append(format_str % tuple(args))
2546

    
2547
  return result
2548

    
2549

    
2550
def _FormatBool(value):
2551
  """Formats a boolean value as a string.
2552

2553
  """
2554
  if value:
2555
    return "Y"
2556
  return "N"
2557

    
2558

    
2559
#: Default formatting for query results; (callback, align right)
2560
_DEFAULT_FORMAT_QUERY = {
2561
  constants.QFT_TEXT: (str, False),
2562
  constants.QFT_BOOL: (_FormatBool, False),
2563
  constants.QFT_NUMBER: (str, True),
2564
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2565
  constants.QFT_OTHER: (str, False),
2566
  constants.QFT_UNKNOWN: (str, False),
2567
  }
2568

    
2569

    
2570
def _GetColumnFormatter(fdef, override, unit):
2571
  """Returns formatting function for a field.
2572

2573
  @type fdef: L{objects.QueryFieldDefinition}
2574
  @type override: dict
2575
  @param override: Dictionary for overriding field formatting functions,
2576
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2577
  @type unit: string
2578
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2579
  @rtype: tuple; (callable, bool)
2580
  @return: Returns the function to format a value (takes one parameter) and a
2581
    boolean for aligning the value on the right-hand side
2582

2583
  """
2584
  fmt = override.get(fdef.name, None)
2585
  if fmt is not None:
2586
    return fmt
2587

    
2588
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2589

    
2590
  if fdef.kind == constants.QFT_UNIT:
2591
    # Can't keep this information in the static dictionary
2592
    return (lambda value: utils.FormatUnit(value, unit), True)
2593

    
2594
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2595
  if fmt is not None:
2596
    return fmt
2597

    
2598
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2599

    
2600

    
2601
class _QueryColumnFormatter:
2602
  """Callable class for formatting fields of a query.
2603

2604
  """
2605
  def __init__(self, fn, status_fn, verbose):
2606
    """Initializes this class.
2607

2608
    @type fn: callable
2609
    @param fn: Formatting function
2610
    @type status_fn: callable
2611
    @param status_fn: Function to report fields' status
2612
    @type verbose: boolean
2613
    @param verbose: whether to use verbose field descriptions or not
2614

2615
    """
2616
    self._fn = fn
2617
    self._status_fn = status_fn
2618
    self._verbose = verbose
2619

    
2620
  def __call__(self, data):
2621
    """Returns a field's string representation.
2622

2623
    """
2624
    (status, value) = data
2625

    
2626
    # Report status
2627
    self._status_fn(status)
2628

    
2629
    if status == constants.RS_NORMAL:
2630
      return self._fn(value)
2631

    
2632
    assert value is None, \
2633
           "Found value %r for abnormal status %s" % (value, status)
2634

    
2635
    return FormatResultError(status, self._verbose)
2636

    
2637

    
2638
def FormatResultError(status, verbose):
2639
  """Formats result status other than L{constants.RS_NORMAL}.
2640

2641
  @param status: The result status
2642
  @type verbose: boolean
2643
  @param verbose: Whether to return the verbose text
2644
  @return: Text of result status
2645

2646
  """
2647
  assert status != constants.RS_NORMAL, \
2648
         "FormatResultError called with status equal to constants.RS_NORMAL"
2649
  try:
2650
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2651
  except KeyError:
2652
    raise NotImplementedError("Unknown status %s" % status)
2653
  else:
2654
    if verbose:
2655
      return verbose_text
2656
    return normal_text
2657

    
2658

    
2659
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2660
                      header=False, verbose=False):
2661
  """Formats data in L{objects.QueryResponse}.
2662

2663
  @type result: L{objects.QueryResponse}
2664
  @param result: result of query operation
2665
  @type unit: string
2666
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2667
    see L{utils.text.FormatUnit}
2668
  @type format_override: dict
2669
  @param format_override: Dictionary for overriding field formatting functions,
2670
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2671
  @type separator: string or None
2672
  @param separator: String used to separate fields
2673
  @type header: bool
2674
  @param header: Whether to output header row
2675
  @type verbose: boolean
2676
  @param verbose: whether to use verbose field descriptions or not
2677

2678
  """
2679
  if unit is None:
2680
    if separator:
2681
      unit = "m"
2682
    else:
2683
      unit = "h"
2684

    
2685
  if format_override is None:
2686
    format_override = {}
2687

    
2688
  stats = dict.fromkeys(constants.RS_ALL, 0)
2689

    
2690
  def _RecordStatus(status):
2691
    if status in stats:
2692
      stats[status] += 1
2693

    
2694
  columns = []
2695
  for fdef in result.fields:
2696
    assert fdef.title and fdef.name
2697
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2698
    columns.append(TableColumn(fdef.title,
2699
                               _QueryColumnFormatter(fn, _RecordStatus,
2700
                                                     verbose),
2701
                               align_right))
2702

    
2703
  table = FormatTable(result.data, columns, header, separator)
2704

    
2705
  # Collect statistics
2706
  assert len(stats) == len(constants.RS_ALL)
2707
  assert compat.all(count >= 0 for count in stats.values())
2708

    
2709
  # Determine overall status. If there was no data, unknown fields must be
2710
  # detected via the field definitions.
2711
  if (stats[constants.RS_UNKNOWN] or
2712
      (not result.data and _GetUnknownFields(result.fields))):
2713
    status = QR_UNKNOWN
2714
  elif compat.any(count > 0 for key, count in stats.items()
2715
                  if key != constants.RS_NORMAL):
2716
    status = QR_INCOMPLETE
2717
  else:
2718
    status = QR_NORMAL
2719

    
2720
  return (status, table)
2721

    
2722

    
2723
def _GetUnknownFields(fdefs):
2724
  """Returns list of unknown fields included in C{fdefs}.
2725

2726
  @type fdefs: list of L{objects.QueryFieldDefinition}
2727

2728
  """
2729
  return [fdef for fdef in fdefs
2730
          if fdef.kind == constants.QFT_UNKNOWN]
2731

    
2732

    
2733
def _WarnUnknownFields(fdefs):
2734
  """Prints a warning to stderr if a query included unknown fields.
2735

2736
  @type fdefs: list of L{objects.QueryFieldDefinition}
2737

2738
  """
2739
  unknown = _GetUnknownFields(fdefs)
2740
  if unknown:
2741
    ToStderr("Warning: Queried for unknown fields %s",
2742
             utils.CommaJoin(fdef.name for fdef in unknown))
2743
    return True
2744

    
2745
  return False
2746

    
2747

    
2748
def GenericList(resource, fields, names, unit, separator, header, cl=None,
2749
                format_override=None, verbose=False, force_filter=False):
2750
  """Generic implementation for listing all items of a resource.
2751

2752
  @param resource: One of L{constants.QR_VIA_LUXI}
2753
  @type fields: list of strings
2754
  @param fields: List of fields to query for
2755
  @type names: list of strings
2756
  @param names: Names of items to query for
2757
  @type unit: string or None
2758
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2759
    None for automatic choice (human-readable for non-separator usage,
2760
    otherwise megabytes); this is a one-letter string
2761
  @type separator: string or None
2762
  @param separator: String used to separate fields
2763
  @type header: bool
2764
  @param header: Whether to show header row
2765
  @type force_filter: bool
2766
  @param force_filter: Whether to always treat names as filter
2767
  @type format_override: dict
2768
  @param format_override: Dictionary for overriding field formatting functions,
2769
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2770
  @type verbose: boolean
2771
  @param verbose: whether to use verbose field descriptions or not
2772

2773
  """
2774
  if not names:
2775
    names = None
2776

    
2777
  qfilter = qlang.MakeFilter(names, force_filter)
2778

    
2779
  if cl is None:
2780
    cl = GetClient()
2781

    
2782
  response = cl.Query(resource, fields, qfilter)
2783

    
2784
  found_unknown = _WarnUnknownFields(response.fields)
2785

    
2786
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2787
                                     header=header,
2788
                                     format_override=format_override,
2789
                                     verbose=verbose)
2790

    
2791
  for line in data:
2792
    ToStdout(line)
2793

    
2794
  assert ((found_unknown and status == QR_UNKNOWN) or
2795
          (not found_unknown and status != QR_UNKNOWN))
2796

    
2797
  if status == QR_UNKNOWN:
2798
    return constants.EXIT_UNKNOWN_FIELD
2799

    
2800
  # TODO: Should the list command fail if not all data could be collected?
2801
  return constants.EXIT_SUCCESS
2802

    
2803

    
2804
def GenericListFields(resource, fields, separator, header, cl=None):
2805
  """Generic implementation for listing fields for a resource.
2806

2807
  @param resource: One of L{constants.QR_VIA_LUXI}
2808
  @type fields: list of strings
2809
  @param fields: List of fields to query for
2810
  @type separator: string or None
2811
  @param separator: String used to separate fields
2812
  @type header: bool
2813
  @param header: Whether to show header row
2814

2815
  """
2816
  if cl is None:
2817
    cl = GetClient()
2818

    
2819
  if not fields:
2820
    fields = None
2821

    
2822
  response = cl.QueryFields(resource, fields)
2823

    
2824
  found_unknown = _WarnUnknownFields(response.fields)
2825

    
2826
  columns = [
2827
    TableColumn("Name", str, False),
2828
    TableColumn("Title", str, False),
2829
    TableColumn("Description", str, False),
2830
    ]
2831

    
2832
  rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields]
2833

    
2834
  for line in FormatTable(rows, columns, header, separator):
2835
    ToStdout(line)
2836

    
2837
  if found_unknown:
2838
    return constants.EXIT_UNKNOWN_FIELD
2839

    
2840
  return constants.EXIT_SUCCESS
2841

    
2842

    
2843
class TableColumn:
2844
  """Describes a column for L{FormatTable}.
2845

2846
  """
2847
  def __init__(self, title, fn, align_right):
2848
    """Initializes this class.
2849

2850
    @type title: string
2851
    @param title: Column title
2852
    @type fn: callable
2853
    @param fn: Formatting function
2854
    @type align_right: bool
2855
    @param align_right: Whether to align values on the right-hand side
2856

2857
    """
2858
    self.title = title
2859
    self.format = fn
2860
    self.align_right = align_right
2861

    
2862

    
2863
def _GetColFormatString(width, align_right):
2864
  """Returns the format string for a field.
2865

2866
  """
2867
  if align_right:
2868
    sign = ""
2869
  else:
2870
    sign = "-"
2871

    
2872
  return "%%%s%ss" % (sign, width)
2873

    
2874

    
2875
def FormatTable(rows, columns, header, separator):
2876
  """Formats data as a table.
2877

2878
  @type rows: list of lists
2879
  @param rows: Row data, one list per row
2880
  @type columns: list of L{TableColumn}
2881
  @param columns: Column descriptions
2882
  @type header: bool
2883
  @param header: Whether to show header row
2884
  @type separator: string or None
2885
  @param separator: String used to separate columns
2886

2887
  """
2888
  if header:
2889
    data = [[col.title for col in columns]]
2890
    colwidth = [len(col.title) for col in columns]
2891
  else:
2892
    data = []
2893
    colwidth = [0 for _ in columns]
2894

    
2895
  # Format row data
2896
  for row in rows:
2897
    assert len(row) == len(columns)
2898

    
2899
    formatted = [col.format(value) for value, col in zip(row, columns)]
2900

    
2901
    if separator is None:
2902
      # Update column widths
2903
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
2904
        # Modifying a list's items while iterating is fine
2905
        colwidth[idx] = max(oldwidth, len(value))
2906

    
2907
    data.append(formatted)
2908

    
2909
  if separator is not None:
2910
    # Return early if a separator is used
2911
    return [separator.join(row) for row in data]
2912

    
2913
  if columns and not columns[-1].align_right:
2914
    # Avoid unnecessary spaces at end of line
2915
    colwidth[-1] = 0
2916

    
2917
  # Build format string
2918
  fmt = " ".join([_GetColFormatString(width, col.align_right)
2919
                  for col, width in zip(columns, colwidth)])
2920

    
2921
  return [fmt % tuple(row) for row in data]
2922

    
2923

    
2924
def FormatTimestamp(ts):
2925
  """Formats a given timestamp.
2926

2927
  @type ts: timestamp
2928
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
2929

2930
  @rtype: string
2931
  @return: a string with the formatted timestamp
2932

2933
  """
2934
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
2935
    return "?"
2936
  sec, usec = ts
2937
  return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2938

    
2939

    
2940
def ParseTimespec(value):
2941
  """Parse a time specification.
2942

2943
  The following suffixed will be recognized:
2944

2945
    - s: seconds
2946
    - m: minutes
2947
    - h: hours
2948
    - d: day
2949
    - w: weeks
2950

2951
  Without any suffix, the value will be taken to be in seconds.
2952

2953
  """
2954
  value = str(value)
2955
  if not value:
2956
    raise errors.OpPrereqError("Empty time specification passed")
2957
  suffix_map = {
2958
    "s": 1,
2959
    "m": 60,
2960
    "h": 3600,
2961
    "d": 86400,
2962
    "w": 604800,
2963
    }
2964
  if value[-1] not in suffix_map:
2965
    try:
2966
      value = int(value)
2967
    except (TypeError, ValueError):
2968
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2969
  else:
2970
    multiplier = suffix_map[value[-1]]
2971
    value = value[:-1]
2972
    if not value: # no data left after stripping the suffix
2973
      raise errors.OpPrereqError("Invalid time specification (only"
2974
                                 " suffix passed)")
2975
    try:
2976
      value = int(value) * multiplier
2977
    except (TypeError, ValueError):
2978
      raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2979
  return value
2980

    
2981

    
2982
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2983
                   filter_master=False, nodegroup=None):
2984
  """Returns the names of online nodes.
2985

2986
  This function will also log a warning on stderr with the names of
2987
  the online nodes.
2988

2989
  @param nodes: if not empty, use only this subset of nodes (minus the
2990
      offline ones)
2991
  @param cl: if not None, luxi client to use
2992
  @type nowarn: boolean
2993
  @param nowarn: by default, this function will output a note with the
2994
      offline nodes that are skipped; if this parameter is True the
2995
      note is not displayed
2996
  @type secondary_ips: boolean
2997
  @param secondary_ips: if True, return the secondary IPs instead of the
2998
      names, useful for doing network traffic over the replication interface
2999
      (if any)
3000
  @type filter_master: boolean
3001
  @param filter_master: if True, do not return the master node in the list
3002
      (useful in coordination with secondary_ips where we cannot check our
3003
      node name against the list)
3004
  @type nodegroup: string
3005
  @param nodegroup: If set, only return nodes in this node group
3006

3007
  """
3008
  if cl is None:
3009
    cl = GetClient()
3010

    
3011
  qfilter = []
3012

    
3013
  if nodes:
3014
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3015

    
3016
  if nodegroup is not None:
3017
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3018
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3019

    
3020
  if filter_master:
3021
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3022

    
3023
  if qfilter:
3024
    if len(qfilter) > 1:
3025
      final_filter = [qlang.OP_AND] + qfilter
3026
    else:
3027
      assert len(qfilter) == 1
3028
      final_filter = qfilter[0]
3029
  else:
3030
    final_filter = None
3031

    
3032
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3033

    
3034
  def _IsOffline(row):
3035
    (_, (_, offline), _) = row
3036
    return offline
3037

    
3038
  def _GetName(row):
3039
    ((_, name), _, _) = row
3040
    return name
3041

    
3042
  def _GetSip(row):
3043
    (_, _, (_, sip)) = row
3044
    return sip
3045

    
3046
  (offline, online) = compat.partition(result.data, _IsOffline)
3047

    
3048
  if offline and not nowarn:
3049
    ToStderr("Note: skipping offline node(s): %s" %
3050
             utils.CommaJoin(map(_GetName, offline)))
3051

    
3052
  if secondary_ips:
3053
    fn = _GetSip
3054
  else:
3055
    fn = _GetName
3056

    
3057
  return map(fn, online)
3058

    
3059

    
3060
def _ToStream(stream, txt, *args):
3061
  """Write a message to a stream, bypassing the logging system
3062

3063
  @type stream: file object
3064
  @param stream: the file to which we should write
3065
  @type txt: str
3066
  @param txt: the message
3067

3068
  """
3069
  try:
3070
    if args:
3071
      args = tuple(args)
3072
      stream.write(txt % args)
3073
    else:
3074
      stream.write(txt)
3075
    stream.write("\n")
3076
    stream.flush()
3077
  except IOError, err:
3078
    if err.errno == errno.EPIPE:
3079
      # our terminal went away, we'll exit
3080
      sys.exit(constants.EXIT_FAILURE)
3081
    else:
3082
      raise
3083

    
3084

    
3085
def ToStdout(txt, *args):
3086
  """Write a message to stdout only, bypassing the logging system
3087

3088
  This is just a wrapper over _ToStream.
3089

3090
  @type txt: str
3091
  @param txt: the message
3092

3093
  """
3094
  _ToStream(sys.stdout, txt, *args)
3095

    
3096

    
3097
def ToStderr(txt, *args):
3098
  """Write a message to stderr only, bypassing the logging system
3099

3100
  This is just a wrapper over _ToStream.
3101

3102
  @type txt: str
3103
  @param txt: the message
3104

3105
  """
3106
  _ToStream(sys.stderr, txt, *args)
3107

    
3108

    
3109
class JobExecutor(object):
3110
  """Class which manages the submission and execution of multiple jobs.
3111

3112
  Note that instances of this class should not be reused between
3113
  GetResults() calls.
3114

3115
  """
3116
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3117
    self.queue = []
3118
    if cl is None:
3119
      cl = GetClient()
3120
    self.cl = cl
3121
    self.verbose = verbose
3122
    self.jobs = []
3123
    self.opts = opts
3124
    self.feedback_fn = feedback_fn
3125
    self._counter = itertools.count()
3126

    
3127
  @staticmethod
3128
  def _IfName(name, fmt):
3129
    """Helper function for formatting name.
3130

3131
    """
3132
    if name:
3133
      return fmt % name
3134

    
3135
    return ""
3136

    
3137
  def QueueJob(self, name, *ops):
3138
    """Record a job for later submit.
3139

3140
    @type name: string
3141
    @param name: a description of the job, will be used in WaitJobSet
3142

3143
    """
3144
    SetGenericOpcodeOpts(ops, self.opts)
3145
    self.queue.append((self._counter.next(), name, ops))
3146

    
3147
  def AddJobId(self, name, status, job_id):
3148
    """Adds a job ID to the internal queue.
3149

3150
    """
3151
    self.jobs.append((self._counter.next(), status, job_id, name))
3152

    
3153
  def SubmitPending(self, each=False):
3154
    """Submit all pending jobs.
3155

3156
    """
3157
    if each:
3158
      results = []
3159
      for (_, _, ops) in self.queue:
3160
        # SubmitJob will remove the success status, but raise an exception if
3161
        # the submission fails, so we'll notice that anyway.
3162
        results.append([True, self.cl.SubmitJob(ops)[0]])
3163
    else:
3164
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3165
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3166
      self.jobs.append((idx, status, data, name))
3167

    
3168
  def _ChooseJob(self):
3169
    """Choose a non-waiting/queued job to poll next.
3170

3171
    """
3172
    assert self.jobs, "_ChooseJob called with empty job list"
3173

    
3174
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3175
                               ["status"])
3176
    assert result
3177

    
3178
    for job_data, status in zip(self.jobs, result):
3179
      if (isinstance(status, list) and status and
3180
          status[0] in (constants.JOB_STATUS_QUEUED,
3181
                        constants.JOB_STATUS_WAITING,
3182
                        constants.JOB_STATUS_CANCELING)):
3183
        # job is still present and waiting
3184
        continue
3185
      # good candidate found (either running job or lost job)
3186
      self.jobs.remove(job_data)
3187
      return job_data
3188

    
3189
    # no job found
3190
    return self.jobs.pop(0)
3191

    
3192
  def GetResults(self):
3193
    """Wait for and return the results of all jobs.
3194

3195
    @rtype: list
3196
    @return: list of tuples (success, job results), in the same order
3197
        as the submitted jobs; if a job has failed, instead of the result
3198
        there will be the error message
3199

3200
    """
3201
    if not self.jobs:
3202
      self.SubmitPending()
3203
    results = []
3204
    if self.verbose:
3205
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3206
      if ok_jobs:
3207
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3208

    
3209
    # first, remove any non-submitted jobs
3210
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3211
    for idx, _, jid, name in failures:
3212
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3213
      results.append((idx, False, jid))
3214

    
3215
    while self.jobs:
3216
      (idx, _, jid, name) = self._ChooseJob()
3217
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3218
      try:
3219
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3220
        success = True
3221
      except errors.JobLost, err:
3222
        _, job_result = FormatError(err)
3223
        ToStderr("Job %s%s has been archived, cannot check its result",
3224
                 jid, self._IfName(name, " for %s"))
3225
        success = False
3226
      except (errors.GenericError, luxi.ProtocolError), err:
3227
        _, job_result = FormatError(err)
3228
        success = False
3229
        # the error message will always be shown, verbose or not
3230
        ToStderr("Job %s%s has failed: %s",
3231
                 jid, self._IfName(name, " for %s"), job_result)
3232

    
3233
      results.append((idx, success, job_result))
3234

    
3235
    # sort based on the index, then drop it
3236
    results.sort()
3237
    results = [i[1:] for i in results]
3238

    
3239
    return results
3240

    
3241
  def WaitOrShow(self, wait):
3242
    """Wait for job results or only print the job IDs.
3243

3244
    @type wait: boolean
3245
    @param wait: whether to wait or not
3246

3247
    """
3248
    if wait:
3249
      return self.GetResults()
3250
    else:
3251
      if not self.jobs:
3252
        self.SubmitPending()
3253
      for _, status, result, name in self.jobs:
3254
        if status:
3255
          ToStdout("%s: %s", result, name)
3256
        else:
3257
          ToStderr("Failure for %s: %s", name, result)
3258
      return [row[1:3] for row in self.jobs]
3259

    
3260

    
3261
def FormatParameterDict(buf, param_dict, actual, level=1):
3262
  """Formats a parameter dictionary.
3263

3264
  @type buf: L{StringIO}
3265
  @param buf: the buffer into which to write
3266
  @type param_dict: dict
3267
  @param param_dict: the own parameters
3268
  @type actual: dict
3269
  @param actual: the current parameter set (including defaults)
3270
  @param level: Level of indent
3271

3272
  """
3273
  indent = "  " * level
3274
  for key in sorted(actual):
3275
    val = param_dict.get(key, "default (%s)" % actual[key])
3276
    buf.write("%s- %s: %s\n" % (indent, key, val))
3277

    
3278

    
3279
def ConfirmOperation(names, list_type, text, extra=""):
3280
  """Ask the user to confirm an operation on a list of list_type.
3281

3282
  This function is used to request confirmation for doing an operation
3283
  on a given list of list_type.
3284

3285
  @type names: list
3286
  @param names: the list of names that we display when
3287
      we ask for confirmation
3288
  @type list_type: str
3289
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3290
  @type text: str
3291
  @param text: the operation that the user should confirm
3292
  @rtype: boolean
3293
  @return: True or False depending on user's confirmation.
3294

3295
  """
3296
  count = len(names)
3297
  msg = ("The %s will operate on %d %s.\n%s"
3298
         "Do you want to continue?" % (text, count, list_type, extra))
3299
  affected = (("\nAffected %s:\n" % list_type) +
3300
              "\n".join(["  %s" % name for name in names]))
3301

    
3302
  choices = [("y", True, "Yes, execute the %s" % text),
3303
             ("n", False, "No, abort the %s" % text)]
3304

    
3305
  if count > 20:
3306
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3307
    question = msg
3308
  else:
3309
    question = msg + affected
3310

    
3311
  choice = AskUser(question, choices)
3312
  if choice == "v":
3313
    choices.pop(1)
3314
    choice = AskUser(msg + affected, choices)
3315
  return choice