Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 015f1517

History | View | Annotate | Download (139.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
import ganeti.rpc.errors as rpcerr
40
import ganeti.rpc.node as rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45
from ganeti import objects
46
from ganeti import pathutils
47

    
48
from ganeti.runtime import (GetClient)
49

    
50
from optparse import (OptionParser, TitledHelpFormatter,
51
                      Option, OptionValueError)
52

    
53

    
54
__all__ = [
55
  # Command line options
56
  "ABSOLUTE_OPT",
57
  "ADD_UIDS_OPT",
58
  "ADD_RESERVED_IPS_OPT",
59
  "ALLOCATABLE_OPT",
60
  "ALLOC_POLICY_OPT",
61
  "ALL_OPT",
62
  "ALLOW_FAILOVER_OPT",
63
  "AUTO_PROMOTE_OPT",
64
  "AUTO_REPLACE_OPT",
65
  "BACKEND_OPT",
66
  "BLK_OS_OPT",
67
  "CAPAB_MASTER_OPT",
68
  "CAPAB_VM_OPT",
69
  "CLEANUP_OPT",
70
  "CLUSTER_DOMAIN_SECRET_OPT",
71
  "CONFIRM_OPT",
72
  "CP_SIZE_OPT",
73
  "DEBUG_OPT",
74
  "DEBUG_SIMERR_OPT",
75
  "DISKIDX_OPT",
76
  "DISK_OPT",
77
  "DISK_PARAMS_OPT",
78
  "DISK_TEMPLATE_OPT",
79
  "DRAINED_OPT",
80
  "DRY_RUN_OPT",
81
  "DRBD_HELPER_OPT",
82
  "DST_NODE_OPT",
83
  "EARLY_RELEASE_OPT",
84
  "ENABLED_HV_OPT",
85
  "ENABLED_DISK_TEMPLATES_OPT",
86
  "ERROR_CODES_OPT",
87
  "FAILURE_ONLY_OPT",
88
  "FIELDS_OPT",
89
  "FILESTORE_DIR_OPT",
90
  "FILESTORE_DRIVER_OPT",
91
  "FORCE_FILTER_OPT",
92
  "FORCE_OPT",
93
  "FORCE_VARIANT_OPT",
94
  "GATEWAY_OPT",
95
  "GATEWAY6_OPT",
96
  "GLOBAL_FILEDIR_OPT",
97
  "HID_OS_OPT",
98
  "GLOBAL_GLUSTER_FILEDIR_OPT",
99
  "GLOBAL_SHARED_FILEDIR_OPT",
100
  "HOTPLUG_OPT",
101
  "HOTPLUG_IF_POSSIBLE_OPT",
102
  "HVLIST_OPT",
103
  "HVOPTS_OPT",
104
  "HYPERVISOR_OPT",
105
  "IALLOCATOR_OPT",
106
  "DEFAULT_IALLOCATOR_OPT",
107
  "DEFAULT_IALLOCATOR_PARAMS_OPT",
108
  "IDENTIFY_DEFAULTS_OPT",
109
  "IGNORE_CONSIST_OPT",
110
  "IGNORE_ERRORS_OPT",
111
  "IGNORE_FAILURES_OPT",
112
  "IGNORE_OFFLINE_OPT",
113
  "IGNORE_REMOVE_FAILURES_OPT",
114
  "IGNORE_SECONDARIES_OPT",
115
  "IGNORE_SIZE_OPT",
116
  "INCLUDEDEFAULTS_OPT",
117
  "INTERVAL_OPT",
118
  "INSTANCE_COMMUNICATION_OPT",
119
  "MAC_PREFIX_OPT",
120
  "MAINTAIN_NODE_HEALTH_OPT",
121
  "MASTER_NETDEV_OPT",
122
  "MASTER_NETMASK_OPT",
123
  "MC_OPT",
124
  "MIGRATION_MODE_OPT",
125
  "MODIFY_ETCHOSTS_OPT",
126
  "NET_OPT",
127
  "NETWORK_OPT",
128
  "NETWORK6_OPT",
129
  "NEW_CLUSTER_CERT_OPT",
130
  "NEW_NODE_CERT_OPT",
131
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
132
  "NEW_CONFD_HMAC_KEY_OPT",
133
  "NEW_RAPI_CERT_OPT",
134
  "NEW_PRIMARY_OPT",
135
  "NEW_SECONDARY_OPT",
136
  "NEW_SPICE_CERT_OPT",
137
  "NIC_PARAMS_OPT",
138
  "NOCONFLICTSCHECK_OPT",
139
  "NODE_FORCE_JOIN_OPT",
140
  "NODE_LIST_OPT",
141
  "NODE_PLACEMENT_OPT",
142
  "NODEGROUP_OPT",
143
  "NODE_PARAMS_OPT",
144
  "NODE_POWERED_OPT",
145
  "NOHDR_OPT",
146
  "NOIPCHECK_OPT",
147
  "NO_INSTALL_OPT",
148
  "NONAMECHECK_OPT",
149
  "NOMODIFY_ETCHOSTS_OPT",
150
  "NOMODIFY_SSH_SETUP_OPT",
151
  "NONICS_OPT",
152
  "NONLIVE_OPT",
153
  "NONPLUS1_OPT",
154
  "NORUNTIME_CHGS_OPT",
155
  "NOSHUTDOWN_OPT",
156
  "NOSTART_OPT",
157
  "NOSSH_KEYCHECK_OPT",
158
  "NOVOTING_OPT",
159
  "NO_REMEMBER_OPT",
160
  "NWSYNC_OPT",
161
  "OFFLINE_INST_OPT",
162
  "ONLINE_INST_OPT",
163
  "ON_PRIMARY_OPT",
164
  "ON_SECONDARY_OPT",
165
  "OFFLINE_OPT",
166
  "OSPARAMS_OPT",
167
  "OS_OPT",
168
  "OS_SIZE_OPT",
169
  "OOB_TIMEOUT_OPT",
170
  "POWER_DELAY_OPT",
171
  "PREALLOC_WIPE_DISKS_OPT",
172
  "PRIMARY_IP_VERSION_OPT",
173
  "PRIMARY_ONLY_OPT",
174
  "PRINT_JOBID_OPT",
175
  "PRIORITY_OPT",
176
  "RAPI_CERT_OPT",
177
  "READD_OPT",
178
  "REASON_OPT",
179
  "REBOOT_TYPE_OPT",
180
  "REMOVE_INSTANCE_OPT",
181
  "REMOVE_RESERVED_IPS_OPT",
182
  "REMOVE_UIDS_OPT",
183
  "RESERVED_LVS_OPT",
184
  "RQL_OPT",
185
  "RUNTIME_MEM_OPT",
186
  "ROMAN_OPT",
187
  "SECONDARY_IP_OPT",
188
  "SECONDARY_ONLY_OPT",
189
  "SELECT_OS_OPT",
190
  "SEP_OPT",
191
  "SHOWCMD_OPT",
192
  "SHOW_MACHINE_OPT",
193
  "COMPRESS_OPT",
194
  "SHUTDOWN_TIMEOUT_OPT",
195
  "SINGLE_NODE_OPT",
196
  "SPECS_CPU_COUNT_OPT",
197
  "SPECS_DISK_COUNT_OPT",
198
  "SPECS_DISK_SIZE_OPT",
199
  "SPECS_MEM_SIZE_OPT",
200
  "SPECS_NIC_COUNT_OPT",
201
  "SPLIT_ISPECS_OPTS",
202
  "IPOLICY_STD_SPECS_OPT",
203
  "IPOLICY_DISK_TEMPLATES",
204
  "IPOLICY_VCPU_RATIO",
205
  "SPICE_CACERT_OPT",
206
  "SPICE_CERT_OPT",
207
  "SRC_DIR_OPT",
208
  "SRC_NODE_OPT",
209
  "SUBMIT_OPT",
210
  "SUBMIT_OPTS",
211
  "STARTUP_PAUSED_OPT",
212
  "STATIC_OPT",
213
  "SYNC_OPT",
214
  "TAG_ADD_OPT",
215
  "TAG_SRC_OPT",
216
  "TIMEOUT_OPT",
217
  "TO_GROUP_OPT",
218
  "UIDPOOL_OPT",
219
  "USEUNITS_OPT",
220
  "USE_EXTERNAL_MIP_SCRIPT",
221
  "USE_REPL_NET_OPT",
222
  "VERBOSE_OPT",
223
  "VG_NAME_OPT",
224
  "WFSYNC_OPT",
225
  "YES_DOIT_OPT",
226
  "DISK_STATE_OPT",
227
  "HV_STATE_OPT",
228
  "IGNORE_IPOLICY_OPT",
229
  "INSTANCE_POLICY_OPTS",
230
  # Generic functions for CLI programs
231
  "ConfirmOperation",
232
  "CreateIPolicyFromOpts",
233
  "GenericMain",
234
  "GenericInstanceCreate",
235
  "GenericList",
236
  "GenericListFields",
237
  "GetClient",
238
  "GetOnlineNodes",
239
  "GetNodesSshPorts",
240
  "JobExecutor",
241
  "JobSubmittedException",
242
  "ParseTimespec",
243
  "RunWhileClusterStopped",
244
  "SubmitOpCode",
245
  "SubmitOpCodeToDrainedQueue",
246
  "SubmitOrSend",
247
  "UsesRPC",
248
  # Formatting functions
249
  "ToStderr", "ToStdout",
250
  "FormatError",
251
  "FormatQueryResult",
252
  "FormatParamsDictInfo",
253
  "FormatPolicyInfo",
254
  "PrintIPolicyCommand",
255
  "PrintGenericInfo",
256
  "GenerateTable",
257
  "AskUser",
258
  "FormatTimestamp",
259
  "FormatLogMessage",
260
  # Tags functions
261
  "ListTags",
262
  "AddTags",
263
  "RemoveTags",
264
  # command line options support infrastructure
265
  "ARGS_MANY_INSTANCES",
266
  "ARGS_MANY_NODES",
267
  "ARGS_MANY_GROUPS",
268
  "ARGS_MANY_NETWORKS",
269
  "ARGS_NONE",
270
  "ARGS_ONE_INSTANCE",
271
  "ARGS_ONE_NODE",
272
  "ARGS_ONE_GROUP",
273
  "ARGS_ONE_OS",
274
  "ARGS_ONE_NETWORK",
275
  "ArgChoice",
276
  "ArgCommand",
277
  "ArgFile",
278
  "ArgGroup",
279
  "ArgHost",
280
  "ArgInstance",
281
  "ArgJobId",
282
  "ArgNetwork",
283
  "ArgNode",
284
  "ArgOs",
285
  "ArgExtStorage",
286
  "ArgSuggest",
287
  "ArgUnknown",
288
  "OPT_COMPL_INST_ADD_NODES",
289
  "OPT_COMPL_MANY_NODES",
290
  "OPT_COMPL_ONE_IALLOCATOR",
291
  "OPT_COMPL_ONE_INSTANCE",
292
  "OPT_COMPL_ONE_NODE",
293
  "OPT_COMPL_ONE_NODEGROUP",
294
  "OPT_COMPL_ONE_NETWORK",
295
  "OPT_COMPL_ONE_OS",
296
  "OPT_COMPL_ONE_EXTSTORAGE",
297
  "cli_option",
298
  "FixHvParams",
299
  "SplitNodeOption",
300
  "CalculateOSNames",
301
  "ParseFields",
302
  "COMMON_CREATE_OPTS",
303
  ]
304

    
305
NO_PREFIX = "no_"
306
UN_PREFIX = "-"
307

    
308
#: Priorities (sorted)
309
_PRIORITY_NAMES = [
310
  ("low", constants.OP_PRIO_LOW),
311
  ("normal", constants.OP_PRIO_NORMAL),
312
  ("high", constants.OP_PRIO_HIGH),
313
  ]
314

    
315
#: Priority dictionary for easier lookup
316
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
317
# we migrate to Python 2.6
318
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
319

    
320
# Query result status for clients
321
(QR_NORMAL,
322
 QR_UNKNOWN,
323
 QR_INCOMPLETE) = range(3)
324

    
325
#: Maximum batch size for ChooseJob
326
_CHOOSE_BATCH = 25
327

    
328

    
329
# constants used to create InstancePolicy dictionary
330
TISPECS_GROUP_TYPES = {
331
  constants.ISPECS_MIN: constants.VTYPE_INT,
332
  constants.ISPECS_MAX: constants.VTYPE_INT,
333
  }
334

    
335
TISPECS_CLUSTER_TYPES = {
336
  constants.ISPECS_MIN: constants.VTYPE_INT,
337
  constants.ISPECS_MAX: constants.VTYPE_INT,
338
  constants.ISPECS_STD: constants.VTYPE_INT,
339
  }
340

    
341
#: User-friendly names for query2 field types
342
_QFT_NAMES = {
343
  constants.QFT_UNKNOWN: "Unknown",
344
  constants.QFT_TEXT: "Text",
345
  constants.QFT_BOOL: "Boolean",
346
  constants.QFT_NUMBER: "Number",
347
  constants.QFT_UNIT: "Storage size",
348
  constants.QFT_TIMESTAMP: "Timestamp",
349
  constants.QFT_OTHER: "Custom",
350
  }
351

    
352

    
353
class _Argument:
354
  def __init__(self, min=0, max=None): # pylint: disable=W0622
355
    self.min = min
356
    self.max = max
357

    
358
  def __repr__(self):
359
    return ("<%s min=%s max=%s>" %
360
            (self.__class__.__name__, self.min, self.max))
361

    
362

    
363
class ArgSuggest(_Argument):
364
  """Suggesting argument.
365

366
  Value can be any of the ones passed to the constructor.
367

368
  """
369
  # pylint: disable=W0622
370
  def __init__(self, min=0, max=None, choices=None):
371
    _Argument.__init__(self, min=min, max=max)
372
    self.choices = choices
373

    
374
  def __repr__(self):
375
    return ("<%s min=%s max=%s choices=%r>" %
376
            (self.__class__.__name__, self.min, self.max, self.choices))
377

    
378

    
379
class ArgChoice(ArgSuggest):
380
  """Choice argument.
381

382
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
383
  but value must be one of the choices.
384

385
  """
386

    
387

    
388
class ArgUnknown(_Argument):
389
  """Unknown argument to program (e.g. determined at runtime).
390

391
  """
392

    
393

    
394
class ArgInstance(_Argument):
395
  """Instances argument.
396

397
  """
398

    
399

    
400
class ArgNode(_Argument):
401
  """Node argument.
402

403
  """
404

    
405

    
406
class ArgNetwork(_Argument):
407
  """Network argument.
408

409
  """
410

    
411

    
412
class ArgGroup(_Argument):
413
  """Node group argument.
414

415
  """
416

    
417

    
418
class ArgJobId(_Argument):
419
  """Job ID argument.
420

421
  """
422

    
423

    
424
class ArgFile(_Argument):
425
  """File path argument.
426

427
  """
428

    
429

    
430
class ArgCommand(_Argument):
431
  """Command argument.
432

433
  """
434

    
435

    
436
class ArgHost(_Argument):
437
  """Host argument.
438

439
  """
440

    
441

    
442
class ArgOs(_Argument):
443
  """OS argument.
444

445
  """
446

    
447

    
448
class ArgExtStorage(_Argument):
449
  """ExtStorage argument.
450

451
  """
452

    
453

    
454
ARGS_NONE = []
455
ARGS_MANY_INSTANCES = [ArgInstance()]
456
ARGS_MANY_NETWORKS = [ArgNetwork()]
457
ARGS_MANY_NODES = [ArgNode()]
458
ARGS_MANY_GROUPS = [ArgGroup()]
459
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
460
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
461
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
462
# TODO
463
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
464
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
465

    
466

    
467
def _ExtractTagsObject(opts, args):
468
  """Extract the tag type object.
469

470
  Note that this function will modify its args parameter.
471

472
  """
473
  if not hasattr(opts, "tag_type"):
474
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
475
  kind = opts.tag_type
476
  if kind == constants.TAG_CLUSTER:
477
    retval = kind, ""
478
  elif kind in (constants.TAG_NODEGROUP,
479
                constants.TAG_NODE,
480
                constants.TAG_NETWORK,
481
                constants.TAG_INSTANCE):
482
    if not args:
483
      raise errors.OpPrereqError("no arguments passed to the command",
484
                                 errors.ECODE_INVAL)
485
    name = args.pop(0)
486
    retval = kind, name
487
  else:
488
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
489
  return retval
490

    
491

    
492
def _ExtendTags(opts, args):
493
  """Extend the args if a source file has been given.
494

495
  This function will extend the tags with the contents of the file
496
  passed in the 'tags_source' attribute of the opts parameter. A file
497
  named '-' will be replaced by stdin.
498

499
  """
500
  fname = opts.tags_source
501
  if fname is None:
502
    return
503
  if fname == "-":
504
    new_fh = sys.stdin
505
  else:
506
    new_fh = open(fname, "r")
507
  new_data = []
508
  try:
509
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
510
    # because of python bug 1633941
511
    while True:
512
      line = new_fh.readline()
513
      if not line:
514
        break
515
      new_data.append(line.strip())
516
  finally:
517
    new_fh.close()
518
  args.extend(new_data)
519

    
520

    
521
def ListTags(opts, args):
522
  """List the tags on a given object.
523

524
  This is a generic implementation that knows how to deal with all
525
  three cases of tag objects (cluster, node, instance). The opts
526
  argument is expected to contain a tag_type field denoting what
527
  object type we work on.
528

529
  """
530
  kind, name = _ExtractTagsObject(opts, args)
531
  cl = GetClient(query=True)
532
  result = cl.QueryTags(kind, name)
533
  result = list(result)
534
  result.sort()
535
  for tag in result:
536
    ToStdout(tag)
537

    
538

    
539
def AddTags(opts, args):
540
  """Add tags on a given object.
541

542
  This is a generic implementation that knows how to deal with all
543
  three cases of tag objects (cluster, node, instance). The opts
544
  argument is expected to contain a tag_type field denoting what
545
  object type we work on.
546

547
  """
548
  kind, name = _ExtractTagsObject(opts, args)
549
  _ExtendTags(opts, args)
550
  if not args:
551
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
552
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
553
  SubmitOrSend(op, opts)
554

    
555

    
556
def RemoveTags(opts, args):
557
  """Remove tags from a given object.
558

559
  This is a generic implementation that knows how to deal with all
560
  three cases of tag objects (cluster, node, instance). The opts
561
  argument is expected to contain a tag_type field denoting what
562
  object type we work on.
563

564
  """
565
  kind, name = _ExtractTagsObject(opts, args)
566
  _ExtendTags(opts, args)
567
  if not args:
568
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
569
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
570
  SubmitOrSend(op, opts)
571

    
572

    
573
def check_unit(option, opt, value): # pylint: disable=W0613
574
  """OptParsers custom converter for units.
575

576
  """
577
  try:
578
    return utils.ParseUnit(value)
579
  except errors.UnitParseError, err:
580
    raise OptionValueError("option %s: %s" % (opt, err))
581

    
582

    
583
def _SplitKeyVal(opt, data, parse_prefixes):
584
  """Convert a KeyVal string into a dict.
585

586
  This function will convert a key=val[,...] string into a dict. Empty
587
  values will be converted specially: keys which have the prefix 'no_'
588
  will have the value=False and the prefix stripped, keys with the prefix
589
  "-" will have value=None and the prefix stripped, and the others will
590
  have value=True.
591

592
  @type opt: string
593
  @param opt: a string holding the option name for which we process the
594
      data, used in building error messages
595
  @type data: string
596
  @param data: a string of the format key=val,key=val,...
597
  @type parse_prefixes: bool
598
  @param parse_prefixes: whether to handle prefixes specially
599
  @rtype: dict
600
  @return: {key=val, key=val}
601
  @raises errors.ParameterError: if there are duplicate keys
602

603
  """
604
  kv_dict = {}
605
  if data:
606
    for elem in utils.UnescapeAndSplit(data, sep=","):
607
      if "=" in elem:
608
        key, val = elem.split("=", 1)
609
      elif parse_prefixes:
610
        if elem.startswith(NO_PREFIX):
611
          key, val = elem[len(NO_PREFIX):], False
612
        elif elem.startswith(UN_PREFIX):
613
          key, val = elem[len(UN_PREFIX):], None
614
        else:
615
          key, val = elem, True
616
      else:
617
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
618
                                    (elem, opt))
619
      if key in kv_dict:
620
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
621
                                    (key, opt))
622
      kv_dict[key] = val
623
  return kv_dict
624

    
625

    
626
def _SplitIdentKeyVal(opt, value, parse_prefixes):
627
  """Helper function to parse "ident:key=val,key=val" options.
628

629
  @type opt: string
630
  @param opt: option name, used in error messages
631
  @type value: string
632
  @param value: expected to be in the format "ident:key=val,key=val,..."
633
  @type parse_prefixes: bool
634
  @param parse_prefixes: whether to handle prefixes specially (see
635
      L{_SplitKeyVal})
636
  @rtype: tuple
637
  @return: (ident, {key=val, key=val})
638
  @raises errors.ParameterError: in case of duplicates or other parsing errors
639

640
  """
641
  if ":" not in value:
642
    ident, rest = value, ""
643
  else:
644
    ident, rest = value.split(":", 1)
645

    
646
  if parse_prefixes and ident.startswith(NO_PREFIX):
647
    if rest:
648
      msg = "Cannot pass options when removing parameter groups: %s" % value
649
      raise errors.ParameterError(msg)
650
    retval = (ident[len(NO_PREFIX):], False)
651
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
652
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
653
    if rest:
654
      msg = "Cannot pass options when removing parameter groups: %s" % value
655
      raise errors.ParameterError(msg)
656
    retval = (ident[len(UN_PREFIX):], None)
657
  else:
658
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
659
    retval = (ident, kv_dict)
660
  return retval
661

    
662

    
663
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
664
  """Custom parser for ident:key=val,key=val options.
665

666
  This will store the parsed values as a tuple (ident, {key: val}). As such,
667
  multiple uses of this option via action=append is possible.
668

669
  """
670
  return _SplitIdentKeyVal(opt, value, True)
671

    
672

    
673
def check_key_val(option, opt, value):  # pylint: disable=W0613
674
  """Custom parser class for key=val,key=val options.
675

676
  This will store the parsed values as a dict {key: val}.
677

678
  """
679
  return _SplitKeyVal(opt, value, True)
680

    
681

    
682
def _SplitListKeyVal(opt, value):
683
  retval = {}
684
  for elem in value.split("/"):
685
    if not elem:
686
      raise errors.ParameterError("Empty section in option '%s'" % opt)
687
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
688
    if ident in retval:
689
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
690
             (ident, opt, elem))
691
      raise errors.ParameterError(msg)
692
    retval[ident] = valdict
693
  return retval
694

    
695

    
696
def check_multilist_ident_key_val(_, opt, value):
697
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
698

699
  @rtype: list of dictionary
700
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
701

702
  """
703
  retval = []
704
  for line in value.split("//"):
705
    retval.append(_SplitListKeyVal(opt, line))
706
  return retval
707

    
708

    
709
def check_bool(option, opt, value): # pylint: disable=W0613
710
  """Custom parser for yes/no options.
711

712
  This will store the parsed value as either True or False.
713

714
  """
715
  value = value.lower()
716
  if value == constants.VALUE_FALSE or value == "no":
717
    return False
718
  elif value == constants.VALUE_TRUE or value == "yes":
719
    return True
720
  else:
721
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
722

    
723

    
724
def check_list(option, opt, value): # pylint: disable=W0613
725
  """Custom parser for comma-separated lists.
726

727
  """
728
  # we have to make this explicit check since "".split(",") is [""],
729
  # not an empty list :(
730
  if not value:
731
    return []
732
  else:
733
    return utils.UnescapeAndSplit(value)
734

    
735

    
736
def check_maybefloat(option, opt, value): # pylint: disable=W0613
737
  """Custom parser for float numbers which might be also defaults.
738

739
  """
740
  value = value.lower()
741

    
742
  if value == constants.VALUE_DEFAULT:
743
    return value
744
  else:
745
    return float(value)
746

    
747

    
748
# completion_suggestion is normally a list. Using numeric values not evaluating
749
# to False for dynamic completion.
750
(OPT_COMPL_MANY_NODES,
751
 OPT_COMPL_ONE_NODE,
752
 OPT_COMPL_ONE_INSTANCE,
753
 OPT_COMPL_ONE_OS,
754
 OPT_COMPL_ONE_EXTSTORAGE,
755
 OPT_COMPL_ONE_IALLOCATOR,
756
 OPT_COMPL_ONE_NETWORK,
757
 OPT_COMPL_INST_ADD_NODES,
758
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
759

    
760
OPT_COMPL_ALL = compat.UniqueFrozenset([
761
  OPT_COMPL_MANY_NODES,
762
  OPT_COMPL_ONE_NODE,
763
  OPT_COMPL_ONE_INSTANCE,
764
  OPT_COMPL_ONE_OS,
765
  OPT_COMPL_ONE_EXTSTORAGE,
766
  OPT_COMPL_ONE_IALLOCATOR,
767
  OPT_COMPL_ONE_NETWORK,
768
  OPT_COMPL_INST_ADD_NODES,
769
  OPT_COMPL_ONE_NODEGROUP,
770
  ])
771

    
772

    
773
class CliOption(Option):
774
  """Custom option class for optparse.
775

776
  """
777
  ATTRS = Option.ATTRS + [
778
    "completion_suggest",
779
    ]
780
  TYPES = Option.TYPES + (
781
    "multilistidentkeyval",
782
    "identkeyval",
783
    "keyval",
784
    "unit",
785
    "bool",
786
    "list",
787
    "maybefloat",
788
    )
789
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
790
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
791
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
792
  TYPE_CHECKER["keyval"] = check_key_val
793
  TYPE_CHECKER["unit"] = check_unit
794
  TYPE_CHECKER["bool"] = check_bool
795
  TYPE_CHECKER["list"] = check_list
796
  TYPE_CHECKER["maybefloat"] = check_maybefloat
797

    
798

    
799
# optparse.py sets make_option, so we do it for our own option class, too
800
cli_option = CliOption
801

    
802

    
803
_YORNO = "yes|no"
804

    
805
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
806
                       help="Increase debugging level")
807

    
808
NOHDR_OPT = cli_option("--no-headers", default=False,
809
                       action="store_true", dest="no_headers",
810
                       help="Don't display column headers")
811

    
812
SEP_OPT = cli_option("--separator", default=None,
813
                     action="store", dest="separator",
814
                     help=("Separator between output fields"
815
                           " (defaults to one space)"))
816

    
817
USEUNITS_OPT = cli_option("--units", default=None,
818
                          dest="units", choices=("h", "m", "g", "t"),
819
                          help="Specify units for output (one of h/m/g/t)")
820

    
821
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
822
                        type="string", metavar="FIELDS",
823
                        help="Comma separated list of output fields")
824

    
825
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
826
                       default=False, help="Force the operation")
827

    
828
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
829
                         default=False, help="Do not require confirmation")
830

    
831
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
832
                                  action="store_true", default=False,
833
                                  help=("Ignore offline nodes and do as much"
834
                                        " as possible"))
835

    
836
TAG_ADD_OPT = cli_option("--tags", dest="tags",
837
                         default=None, help="Comma-separated list of instance"
838
                                            " tags")
839

    
840
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
841
                         default=None, help="File with tag names")
842

    
843
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
844
                        default=False, action="store_true",
845
                        help=("Submit the job and return the job ID, but"
846
                              " don't wait for the job to finish"))
847

    
848
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
849
                             default=False, action="store_true",
850
                             help=("Additionally print the job as first line"
851
                                   " on stdout (for scripting)."))
852

    
853
SYNC_OPT = cli_option("--sync", dest="do_locking",
854
                      default=False, action="store_true",
855
                      help=("Grab locks while doing the queries"
856
                            " in order to ensure more consistent results"))
857

    
858
DRY_RUN_OPT = cli_option("--dry-run", default=False,
859
                         action="store_true",
860
                         help=("Do not execute the operation, just run the"
861
                               " check steps and verify if it could be"
862
                               " executed"))
863

    
864
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
865
                         action="store_true",
866
                         help="Increase the verbosity of the operation")
867

    
868
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
869
                              action="store_true", dest="simulate_errors",
870
                              help="Debugging option that makes the operation"
871
                              " treat most runtime checks as failed")
872

    
873
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
874
                        default=True, action="store_false",
875
                        help="Don't wait for sync (DANGEROUS!)")
876

    
877
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
878
                        default=False, action="store_true",
879
                        help="Wait for disks to sync")
880

    
881
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
882
                             action="store_true", default=False,
883
                             help="Enable offline instance")
884

    
885
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
886
                              action="store_true", default=False,
887
                              help="Disable down instance")
888

    
889
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
890
                               help=("Custom disk setup (%s)" %
891
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
892
                               default=None, metavar="TEMPL",
893
                               choices=list(constants.DISK_TEMPLATES))
894

    
895
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
896
                        help="Do not create any network cards for"
897
                        " the instance")
898

    
899
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
900
                               help="Relative path under default cluster-wide"
901
                               " file storage dir to store file-based disks",
902
                               default=None, metavar="<DIR>")
903

    
904
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
905
                                  help="Driver to use for image files",
906
                                  default=None, metavar="<DRIVER>",
907
                                  choices=list(constants.FILE_DRIVER))
908

    
909
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
910
                            help="Select nodes for the instance automatically"
911
                            " using the <NAME> iallocator plugin",
912
                            default=None, type="string",
913
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
914

    
915
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
916
                                    metavar="<NAME>",
917
                                    help="Set the default instance"
918
                                    " allocator plugin",
919
                                    default=None, type="string",
920
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
921

    
922
DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
923
                                           dest="default_iallocator_params",
924
                                           help="iallocator template"
925
                                           " parameters, in the format"
926
                                           " template:option=value,"
927
                                           " option=value,...",
928
                                           type="keyval",
929
                                           default={})
930

    
931
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
932
                    metavar="<os>",
933
                    completion_suggest=OPT_COMPL_ONE_OS)
934

    
935
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
936
                          type="keyval", default={},
937
                          help="OS parameters")
938

    
939
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
940
                               action="store_true", default=False,
941
                               help="Force an unknown variant")
942

    
943
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
944
                            action="store_true", default=False,
945
                            help="Do not install the OS (will"
946
                            " enable no-start)")
947

    
948
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
949
                                dest="allow_runtime_chgs",
950
                                default=True, action="store_false",
951
                                help="Don't allow runtime changes")
952

    
953
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
954
                         type="keyval", default={},
955
                         help="Backend parameters")
956

    
957
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
958
                        default={}, dest="hvparams",
959
                        help="Hypervisor parameters")
960

    
961
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
962
                             help="Disk template parameters, in the format"
963
                             " template:option=value,option=value,...",
964
                             type="identkeyval", action="append", default=[])
965

    
966
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
967
                                 type="keyval", default={},
968
                                 help="Memory size specs: list of key=value,"
969
                                " where key is one of min, max, std"
970
                                 " (in MB or using a unit)")
971

    
972
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
973
                                 type="keyval", default={},
974
                                 help="CPU count specs: list of key=value,"
975
                                 " where key is one of min, max, std")
976

    
977
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
978
                                  dest="ispecs_disk_count",
979
                                  type="keyval", default={},
980
                                  help="Disk count specs: list of key=value,"
981
                                  " where key is one of min, max, std")
982

    
983
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
984
                                 type="keyval", default={},
985
                                 help="Disk size specs: list of key=value,"
986
                                 " where key is one of min, max, std"
987
                                 " (in MB or using a unit)")
988

    
989
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
990
                                 type="keyval", default={},
991
                                 help="NIC count specs: list of key=value,"
992
                                 " where key is one of min, max, std")
993

    
994
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
995
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
996
                                      dest="ipolicy_bounds_specs",
997
                                      type="multilistidentkeyval", default=None,
998
                                      help="Complete instance specs limits")
999

    
1000
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
1001
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
1002
                                   dest="ipolicy_std_specs",
1003
                                   type="keyval", default=None,
1004
                                   help="Complte standard instance specs")
1005

    
1006
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
1007
                                    dest="ipolicy_disk_templates",
1008
                                    type="list", default=None,
1009
                                    help="Comma-separated list of"
1010
                                    " enabled disk templates")
1011

    
1012
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
1013
                                 dest="ipolicy_vcpu_ratio",
1014
                                 type="maybefloat", default=None,
1015
                                 help="The maximum allowed vcpu-to-cpu ratio")
1016

    
1017
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1018
                                   dest="ipolicy_spindle_ratio",
1019
                                   type="maybefloat", default=None,
1020
                                   help=("The maximum allowed instances to"
1021
                                         " spindle ratio"))
1022

    
1023
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1024
                            help="Hypervisor and hypervisor options, in the"
1025
                            " format hypervisor:option=value,option=value,...",
1026
                            default=None, type="identkeyval")
1027

    
1028
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1029
                        help="Hypervisor and hypervisor options, in the"
1030
                        " format hypervisor:option=value,option=value,...",
1031
                        default=[], action="append", type="identkeyval")
1032

    
1033
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1034
                           action="store_false",
1035
                           help="Don't check that the instance's IP"
1036
                           " is alive")
1037

    
1038
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1039
                             default=True, action="store_false",
1040
                             help="Don't check that the instance's name"
1041
                             " is resolvable")
1042

    
1043
NET_OPT = cli_option("--net",
1044
                     help="NIC parameters", default=[],
1045
                     dest="nics", action="append", type="identkeyval")
1046

    
1047
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1048
                      dest="disks", action="append", type="identkeyval")
1049

    
1050
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1051
                         help="Comma-separated list of disks"
1052
                         " indices to act on (e.g. 0,2) (optional,"
1053
                         " defaults to all disks)")
1054

    
1055
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1056
                         help="Enforces a single-disk configuration using the"
1057
                         " given disk size, in MiB unless a suffix is used",
1058
                         default=None, type="unit", metavar="<size>")
1059

    
1060
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1061
                                dest="ignore_consistency",
1062
                                action="store_true", default=False,
1063
                                help="Ignore the consistency of the disks on"
1064
                                " the secondary")
1065

    
1066
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1067
                                dest="allow_failover",
1068
                                action="store_true", default=False,
1069
                                help="If migration is not possible fallback to"
1070
                                     " failover")
1071

    
1072
NONLIVE_OPT = cli_option("--non-live", dest="live",
1073
                         default=True, action="store_false",
1074
                         help="Do a non-live migration (this usually means"
1075
                         " freeze the instance, save the state, transfer and"
1076
                         " only then resume running on the secondary node)")
1077

    
1078
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1079
                                default=None,
1080
                                choices=list(constants.HT_MIGRATION_MODES),
1081
                                help="Override default migration mode (choose"
1082
                                " either live or non-live")
1083

    
1084
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1085
                                help="Target node and optional secondary node",
1086
                                metavar="<pnode>[:<snode>]",
1087
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1088

    
1089
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1090
                           action="append", metavar="<node>",
1091
                           help="Use only this node (can be used multiple"
1092
                           " times, if not given defaults to all nodes)",
1093
                           completion_suggest=OPT_COMPL_ONE_NODE)
1094

    
1095
NODEGROUP_OPT_NAME = "--node-group"
1096
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1097
                           dest="nodegroup",
1098
                           help="Node group (name or uuid)",
1099
                           metavar="<nodegroup>",
1100
                           default=None, type="string",
1101
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1102

    
1103
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1104
                             metavar="<node>",
1105
                             completion_suggest=OPT_COMPL_ONE_NODE)
1106

    
1107
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1108
                         action="store_false",
1109
                         help="Don't start the instance after creation")
1110

    
1111
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1112
                         action="store_true", default=False,
1113
                         help="Show command instead of executing it")
1114

    
1115
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1116
                         default=False, action="store_true",
1117
                         help="Instead of performing the migration/failover,"
1118
                         " try to recover from a failed cleanup. This is safe"
1119
                         " to run even if the instance is healthy, but it"
1120
                         " will create extra replication traffic and "
1121
                         " disrupt briefly the replication (like during the"
1122
                         " migration/failover")
1123

    
1124
STATIC_OPT = cli_option("-s", "--static", dest="static",
1125
                        action="store_true", default=False,
1126
                        help="Only show configuration data, not runtime data")
1127

    
1128
ALL_OPT = cli_option("--all", dest="show_all",
1129
                     default=False, action="store_true",
1130
                     help="Show info on all instances on the cluster."
1131
                     " This can take a long time to run, use wisely")
1132

    
1133
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1134
                           action="store_true", default=False,
1135
                           help="Interactive OS reinstall, lists available"
1136
                           " OS templates for selection")
1137

    
1138
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1139
                                 action="store_true", default=False,
1140
                                 help="Remove the instance from the cluster"
1141
                                 " configuration even if there are failures"
1142
                                 " during the removal process")
1143

    
1144
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1145
                                        dest="ignore_remove_failures",
1146
                                        action="store_true", default=False,
1147
                                        help="Remove the instance from the"
1148
                                        " cluster configuration even if there"
1149
                                        " are failures during the removal"
1150
                                        " process")
1151

    
1152
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1153
                                 action="store_true", default=False,
1154
                                 help="Remove the instance from the cluster")
1155

    
1156
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1157
                               help="Specifies the new node for the instance",
1158
                               metavar="NODE", default=None,
1159
                               completion_suggest=OPT_COMPL_ONE_NODE)
1160

    
1161
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1162
                               help="Specifies the new secondary node",
1163
                               metavar="NODE", default=None,
1164
                               completion_suggest=OPT_COMPL_ONE_NODE)
1165

    
1166
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1167
                             help="Specifies the new primary node",
1168
                             metavar="<node>", default=None,
1169
                             completion_suggest=OPT_COMPL_ONE_NODE)
1170

    
1171
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1172
                            default=False, action="store_true",
1173
                            help="Replace the disk(s) on the primary"
1174
                                 " node (applies only to internally mirrored"
1175
                                 " disk templates, e.g. %s)" %
1176
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1177

    
1178
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1179
                              default=False, action="store_true",
1180
                              help="Replace the disk(s) on the secondary"
1181
                                   " node (applies only to internally mirrored"
1182
                                   " disk templates, e.g. %s)" %
1183
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1184

    
1185
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1186
                              default=False, action="store_true",
1187
                              help="Lock all nodes and auto-promote as needed"
1188
                              " to MC status")
1189

    
1190
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1191
                              default=False, action="store_true",
1192
                              help="Automatically replace faulty disks"
1193
                                   " (applies only to internally mirrored"
1194
                                   " disk templates, e.g. %s)" %
1195
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1196

    
1197
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1198
                             default=False, action="store_true",
1199
                             help="Ignore current recorded size"
1200
                             " (useful for forcing activation when"
1201
                             " the recorded size is wrong)")
1202

    
1203
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1204
                          metavar="<node>",
1205
                          completion_suggest=OPT_COMPL_ONE_NODE)
1206

    
1207
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1208
                         metavar="<dir>")
1209

    
1210
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1211
                              help="Specify the secondary ip for the node",
1212
                              metavar="ADDRESS", default=None)
1213

    
1214
READD_OPT = cli_option("--readd", dest="readd",
1215
                       default=False, action="store_true",
1216
                       help="Readd old node after replacing it")
1217

    
1218
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1219
                                default=True, action="store_false",
1220
                                help="Disable SSH key fingerprint checking")
1221

    
1222
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1223
                                 default=False, action="store_true",
1224
                                 help="Force the joining of a node")
1225

    
1226
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1227
                    type="bool", default=None, metavar=_YORNO,
1228
                    help="Set the master_candidate flag on the node")
1229

    
1230
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1231
                         type="bool", default=None,
1232
                         help=("Set the offline flag on the node"
1233
                               " (cluster does not communicate with offline"
1234
                               " nodes)"))
1235

    
1236
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1237
                         type="bool", default=None,
1238
                         help=("Set the drained flag on the node"
1239
                               " (excluded from allocation operations)"))
1240

    
1241
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1242
                              type="bool", default=None, metavar=_YORNO,
1243
                              help="Set the master_capable flag on the node")
1244

    
1245
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1246
                          type="bool", default=None, metavar=_YORNO,
1247
                          help="Set the vm_capable flag on the node")
1248

    
1249
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1250
                             type="bool", default=None, metavar=_YORNO,
1251
                             help="Set the allocatable flag on a volume")
1252

    
1253
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1254
                            dest="enabled_hypervisors",
1255
                            help="Comma-separated list of hypervisors",
1256
                            type="string", default=None)
1257

    
1258
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1259
                                        dest="enabled_disk_templates",
1260
                                        help="Comma-separated list of "
1261
                                             "disk templates",
1262
                                        type="string", default=None)
1263

    
1264
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1265
                            type="keyval", default={},
1266
                            help="NIC parameters")
1267

    
1268
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1269
                         dest="candidate_pool_size", type="int",
1270
                         help="Set the candidate pool size")
1271

    
1272
RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs",
1273
                     type="int", help="Set the maximal number of jobs to "
1274
                                      "run simultaneously")
1275

    
1276
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1277
                         help=("Enables LVM and specifies the volume group"
1278
                               " name (cluster-wide) for disk allocation"
1279
                               " [%s]" % constants.DEFAULT_VG),
1280
                         metavar="VG", default=None)
1281

    
1282
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1283
                          help="Destroy cluster", action="store_true")
1284

    
1285
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1286
                          help="Skip node agreement check (dangerous)",
1287
                          action="store_true", default=False)
1288

    
1289
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1290
                            help="Specify the mac prefix for the instance IP"
1291
                            " addresses, in the format XX:XX:XX",
1292
                            metavar="PREFIX",
1293
                            default=None)
1294

    
1295
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1296
                               help="Specify the node interface (cluster-wide)"
1297
                               " on which the master IP address will be added"
1298
                               " (cluster init default: %s)" %
1299
                               constants.DEFAULT_BRIDGE,
1300
                               metavar="NETDEV",
1301
                               default=None)
1302

    
1303
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1304
                                help="Specify the netmask of the master IP",
1305
                                metavar="NETMASK",
1306
                                default=None)
1307

    
1308
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1309
                                     dest="use_external_mip_script",
1310
                                     help="Specify whether to run a"
1311
                                     " user-provided script for the master"
1312
                                     " IP address turnup and"
1313
                                     " turndown operations",
1314
                                     type="bool", metavar=_YORNO, default=None)
1315

    
1316
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1317
                                help="Specify the default directory (cluster-"
1318
                                "wide) for storing the file-based disks [%s]" %
1319
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1320
                                metavar="DIR",
1321
                                default=None)
1322

    
1323
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1324
  "--shared-file-storage-dir",
1325
  dest="shared_file_storage_dir",
1326
  help="Specify the default directory (cluster-wide) for storing the"
1327
  " shared file-based disks [%s]" %
1328
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1329
  metavar="SHAREDDIR", default=None)
1330

    
1331
GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
1332
  "--gluster-storage-dir",
1333
  dest="gluster_storage_dir",
1334
  help="Specify the default directory (cluster-wide) for mounting Gluster"
1335
  " file systems [%s]" %
1336
  pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
1337
  metavar="GLUSTERDIR",
1338
  default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR)
1339

    
1340
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1341
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1342
                                   action="store_false", default=True)
1343

    
1344
MODIFY_ETCHOSTS_OPT = \
1345
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1346
            default=None, type="bool",
1347
            help="Defines whether the cluster should autonomously modify"
1348
            " and keep in sync the /etc/hosts file of the nodes")
1349

    
1350
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1351
                                    help="Don't initialize SSH keys",
1352
                                    action="store_false", default=True)
1353

    
1354
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1355
                             help="Enable parseable error messages",
1356
                             action="store_true", default=False)
1357

    
1358
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1359
                          help="Skip N+1 memory redundancy tests",
1360
                          action="store_true", default=False)
1361

    
1362
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1363
                             help="Type of reboot: soft/hard/full",
1364
                             default=constants.INSTANCE_REBOOT_HARD,
1365
                             metavar="<REBOOT>",
1366
                             choices=list(constants.REBOOT_TYPES))
1367

    
1368
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1369
                                    dest="ignore_secondaries",
1370
                                    default=False, action="store_true",
1371
                                    help="Ignore errors from secondaries")
1372

    
1373
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1374
                            action="store_false", default=True,
1375
                            help="Don't shutdown the instance (unsafe)")
1376

    
1377
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1378
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1379
                         help="Maximum time to wait")
1380

    
1381
COMPRESS_OPT = cli_option("--compress", dest="compress",
1382
                          default=constants.IEC_NONE,
1383
                          help="The compression mode to use",
1384
                          choices=list(constants.IEC_ALL))
1385

    
1386
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1387
                                  dest="shutdown_timeout", type="int",
1388
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1389
                                  help="Maximum time to wait for instance"
1390
                                  " shutdown")
1391

    
1392
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1393
                          default=None,
1394
                          help=("Number of seconds between repetions of the"
1395
                                " command"))
1396

    
1397
EARLY_RELEASE_OPT = cli_option("--early-release",
1398
                               dest="early_release", default=False,
1399
                               action="store_true",
1400
                               help="Release the locks on the secondary"
1401
                               " node(s) early")
1402

    
1403
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1404
                                  dest="new_cluster_cert",
1405
                                  default=False, action="store_true",
1406
                                  help="Generate a new cluster certificate")
1407

    
1408
NEW_NODE_CERT_OPT = cli_option(
1409
  "--new-node-certificates", dest="new_node_cert", default=False,
1410
  action="store_true", help="Generate new node certificates (for all nodes)")
1411

    
1412
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1413
                           default=None,
1414
                           help="File containing new RAPI certificate")
1415

    
1416
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1417
                               default=None, action="store_true",
1418
                               help=("Generate a new self-signed RAPI"
1419
                                     " certificate"))
1420

    
1421
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1422
                            default=None,
1423
                            help="File containing new SPICE certificate")
1424

    
1425
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1426
                              default=None,
1427
                              help="File containing the certificate of the CA"
1428
                              " which signed the SPICE certificate")
1429

    
1430
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1431
                                dest="new_spice_cert", default=None,
1432
                                action="store_true",
1433
                                help=("Generate a new self-signed SPICE"
1434
                                      " certificate"))
1435

    
1436
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1437
                                    dest="new_confd_hmac_key",
1438
                                    default=False, action="store_true",
1439
                                    help=("Create a new HMAC key for %s" %
1440
                                          constants.CONFD))
1441

    
1442
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1443
                                       dest="cluster_domain_secret",
1444
                                       default=None,
1445
                                       help=("Load new new cluster domain"
1446
                                             " secret from file"))
1447

    
1448
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1449
                                           dest="new_cluster_domain_secret",
1450
                                           default=False, action="store_true",
1451
                                           help=("Create a new cluster domain"
1452
                                                 " secret"))
1453

    
1454
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1455
                              dest="use_replication_network",
1456
                              help="Whether to use the replication network"
1457
                              " for talking to the nodes",
1458
                              action="store_true", default=False)
1459

    
1460
MAINTAIN_NODE_HEALTH_OPT = \
1461
    cli_option("--maintain-node-health", dest="maintain_node_health",
1462
               metavar=_YORNO, default=None, type="bool",
1463
               help="Configure the cluster to automatically maintain node"
1464
               " health, by shutting down unknown instances, shutting down"
1465
               " unknown DRBD devices, etc.")
1466

    
1467
IDENTIFY_DEFAULTS_OPT = \
1468
    cli_option("--identify-defaults", dest="identify_defaults",
1469
               default=False, action="store_true",
1470
               help="Identify which saved instance parameters are equal to"
1471
               " the current cluster defaults and set them as such, instead"
1472
               " of marking them as overridden")
1473

    
1474
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1475
                         action="store", dest="uid_pool",
1476
                         help=("A list of user-ids or user-id"
1477
                               " ranges separated by commas"))
1478

    
1479
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1480
                          action="store", dest="add_uids",
1481
                          help=("A list of user-ids or user-id"
1482
                                " ranges separated by commas, to be"
1483
                                " added to the user-id pool"))
1484

    
1485
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1486
                             action="store", dest="remove_uids",
1487
                             help=("A list of user-ids or user-id"
1488
                                   " ranges separated by commas, to be"
1489
                                   " removed from the user-id pool"))
1490

    
1491
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1492
                              action="store", dest="reserved_lvs",
1493
                              help=("A comma-separated list of reserved"
1494
                                    " logical volumes names, that will be"
1495
                                    " ignored by cluster verify"))
1496

    
1497
ROMAN_OPT = cli_option("--roman",
1498
                       dest="roman_integers", default=False,
1499
                       action="store_true",
1500
                       help="Use roman numbers for positive integers")
1501

    
1502
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1503
                             action="store", default=None,
1504
                             help="Specifies usermode helper for DRBD")
1505

    
1506
PRIMARY_IP_VERSION_OPT = \
1507
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1508
               action="store", dest="primary_ip_version",
1509
               metavar="%d|%d" % (constants.IP4_VERSION,
1510
                                  constants.IP6_VERSION),
1511
               help="Cluster-wide IP version for primary IP")
1512

    
1513
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1514
                              action="store_true",
1515
                              help="Show machine name for every line in output")
1516

    
1517
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1518
                              action="store_true",
1519
                              help=("Hide successful results and show failures"
1520
                                    " only (determined by the exit code)"))
1521

    
1522
REASON_OPT = cli_option("--reason", default=None,
1523
                        help="The reason for executing the command")
1524

    
1525

    
1526
def _PriorityOptionCb(option, _, value, parser):
1527
  """Callback for processing C{--priority} option.
1528

1529
  """
1530
  value = _PRIONAME_TO_VALUE[value]
1531

    
1532
  setattr(parser.values, option.dest, value)
1533

    
1534

    
1535
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1536
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1537
                          choices=_PRIONAME_TO_VALUE.keys(),
1538
                          action="callback", type="choice",
1539
                          callback=_PriorityOptionCb,
1540
                          help="Priority for opcode processing")
1541

    
1542
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1543
                        type="bool", default=None, metavar=_YORNO,
1544
                        help="Sets the hidden flag on the OS")
1545

    
1546
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1547
                        type="bool", default=None, metavar=_YORNO,
1548
                        help="Sets the blacklisted flag on the OS")
1549

    
1550
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1551
                                     type="bool", metavar=_YORNO,
1552
                                     dest="prealloc_wipe_disks",
1553
                                     help=("Wipe disks prior to instance"
1554
                                           " creation"))
1555

    
1556
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1557
                             type="keyval", default=None,
1558
                             help="Node parameters")
1559

    
1560
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1561
                              action="store", metavar="POLICY", default=None,
1562
                              help="Allocation policy for the node group")
1563

    
1564
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1565
                              type="bool", metavar=_YORNO,
1566
                              dest="node_powered",
1567
                              help="Specify if the SoR for node is powered")
1568

    
1569
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1570
                             default=constants.OOB_TIMEOUT,
1571
                             help="Maximum time to wait for out-of-band helper")
1572

    
1573
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1574
                             default=constants.OOB_POWER_DELAY,
1575
                             help="Time in seconds to wait between power-ons")
1576

    
1577
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1578
                              action="store_true", default=False,
1579
                              help=("Whether command argument should be treated"
1580
                                    " as filter"))
1581

    
1582
NO_REMEMBER_OPT = cli_option("--no-remember",
1583
                             dest="no_remember",
1584
                             action="store_true", default=False,
1585
                             help="Perform but do not record the change"
1586
                             " in the configuration")
1587

    
1588
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1589
                              default=False, action="store_true",
1590
                              help="Evacuate primary instances only")
1591

    
1592
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1593
                                default=False, action="store_true",
1594
                                help="Evacuate secondary instances only"
1595
                                     " (applies only to internally mirrored"
1596
                                     " disk templates, e.g. %s)" %
1597
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1598

    
1599
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1600
                                action="store_true", default=False,
1601
                                help="Pause instance at startup")
1602

    
1603
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1604
                          help="Destination node group (name or uuid)",
1605
                          default=None, action="append",
1606
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1607

    
1608
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1609
                               action="append", dest="ignore_errors",
1610
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1611
                               help="Error code to be ignored")
1612

    
1613
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1614
                            action="append",
1615
                            help=("Specify disk state information in the"
1616
                                  " format"
1617
                                  " storage_type/identifier:option=value,...;"
1618
                                  " note this is unused for now"),
1619
                            type="identkeyval")
1620

    
1621
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1622
                          action="append",
1623
                          help=("Specify hypervisor state information in the"
1624
                                " format hypervisor:option=value,...;"
1625
                                " note this is unused for now"),
1626
                          type="identkeyval")
1627

    
1628
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1629
                                action="store_true", default=False,
1630
                                help="Ignore instance policy violations")
1631

    
1632
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1633
                             help="Sets the instance's runtime memory,"
1634
                             " ballooning it up or down to the new value",
1635
                             default=None, type="unit", metavar="<size>")
1636

    
1637
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1638
                          action="store_true", default=False,
1639
                          help="Marks the grow as absolute instead of the"
1640
                          " (default) relative mode")
1641

    
1642
NETWORK_OPT = cli_option("--network",
1643
                         action="store", default=None, dest="network",
1644
                         help="IP network in CIDR notation")
1645

    
1646
GATEWAY_OPT = cli_option("--gateway",
1647
                         action="store", default=None, dest="gateway",
1648
                         help="IP address of the router (gateway)")
1649

    
1650
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1651
                                  action="store", default=None,
1652
                                  dest="add_reserved_ips",
1653
                                  help="Comma-separated list of"
1654
                                  " reserved IPs to add")
1655

    
1656
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1657
                                     action="store", default=None,
1658
                                     dest="remove_reserved_ips",
1659
                                     help="Comma-delimited list of"
1660
                                     " reserved IPs to remove")
1661

    
1662
NETWORK6_OPT = cli_option("--network6",
1663
                          action="store", default=None, dest="network6",
1664
                          help="IP network in CIDR notation")
1665

    
1666
GATEWAY6_OPT = cli_option("--gateway6",
1667
                          action="store", default=None, dest="gateway6",
1668
                          help="IP6 address of the router (gateway)")
1669

    
1670
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1671
                                  dest="conflicts_check",
1672
                                  default=True,
1673
                                  action="store_false",
1674
                                  help="Don't check for conflicting IPs")
1675

    
1676
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1677
                                 default=False, action="store_true",
1678
                                 help="Include default values")
1679

    
1680
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1681
                         action="store_true", default=False,
1682
                         help="Hotplug supported devices (NICs and Disks)")
1683

    
1684
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1685
                                     dest="hotplug_if_possible",
1686
                                     action="store_true", default=False,
1687
                                     help="Hotplug devices in case"
1688
                                          " hotplug is supported")
1689

    
1690
INSTANCE_COMMUNICATION_OPT = \
1691
    cli_option("-c", "--communication",
1692
               default=False,
1693
               dest="instance_communication",
1694
               help=constants.INSTANCE_COMMUNICATION_DOC,
1695
               type="bool")
1696

    
1697
#: Options provided by all commands
1698
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1699

    
1700
# options related to asynchronous job handling
1701

    
1702
SUBMIT_OPTS = [
1703
  SUBMIT_OPT,
1704
  PRINT_JOBID_OPT,
1705
  ]
1706

    
1707
# common options for creating instances. add and import then add their own
1708
# specific ones.
1709
COMMON_CREATE_OPTS = [
1710
  BACKEND_OPT,
1711
  DISK_OPT,
1712
  DISK_TEMPLATE_OPT,
1713
  FILESTORE_DIR_OPT,
1714
  FILESTORE_DRIVER_OPT,
1715
  HYPERVISOR_OPT,
1716
  IALLOCATOR_OPT,
1717
  NET_OPT,
1718
  NODE_PLACEMENT_OPT,
1719
  NOIPCHECK_OPT,
1720
  NOCONFLICTSCHECK_OPT,
1721
  NONAMECHECK_OPT,
1722
  NONICS_OPT,
1723
  NWSYNC_OPT,
1724
  OSPARAMS_OPT,
1725
  OS_SIZE_OPT,
1726
  SUBMIT_OPT,
1727
  PRINT_JOBID_OPT,
1728
  TAG_ADD_OPT,
1729
  DRY_RUN_OPT,
1730
  PRIORITY_OPT,
1731
  ]
1732

    
1733
# common instance policy options
1734
INSTANCE_POLICY_OPTS = [
1735
  IPOLICY_BOUNDS_SPECS_OPT,
1736
  IPOLICY_DISK_TEMPLATES,
1737
  IPOLICY_VCPU_RATIO,
1738
  IPOLICY_SPINDLE_RATIO,
1739
  ]
1740

    
1741
# instance policy split specs options
1742
SPLIT_ISPECS_OPTS = [
1743
  SPECS_CPU_COUNT_OPT,
1744
  SPECS_DISK_COUNT_OPT,
1745
  SPECS_DISK_SIZE_OPT,
1746
  SPECS_MEM_SIZE_OPT,
1747
  SPECS_NIC_COUNT_OPT,
1748
  ]
1749

    
1750

    
1751
class _ShowUsage(Exception):
1752
  """Exception class for L{_ParseArgs}.
1753

1754
  """
1755
  def __init__(self, exit_error):
1756
    """Initializes instances of this class.
1757

1758
    @type exit_error: bool
1759
    @param exit_error: Whether to report failure on exit
1760

1761
    """
1762
    Exception.__init__(self)
1763
    self.exit_error = exit_error
1764

    
1765

    
1766
class _ShowVersion(Exception):
1767
  """Exception class for L{_ParseArgs}.
1768

1769
  """
1770

    
1771

    
1772
def _ParseArgs(binary, argv, commands, aliases, env_override):
1773
  """Parser for the command line arguments.
1774

1775
  This function parses the arguments and returns the function which
1776
  must be executed together with its (modified) arguments.
1777

1778
  @param binary: Script name
1779
  @param argv: Command line arguments
1780
  @param commands: Dictionary containing command definitions
1781
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1782
  @param env_override: list of env variables allowed for default args
1783
  @raise _ShowUsage: If usage description should be shown
1784
  @raise _ShowVersion: If version should be shown
1785

1786
  """
1787
  assert not (env_override - set(commands))
1788
  assert not (set(aliases.keys()) & set(commands.keys()))
1789

    
1790
  if len(argv) > 1:
1791
    cmd = argv[1]
1792
  else:
1793
    # No option or command given
1794
    raise _ShowUsage(exit_error=True)
1795

    
1796
  if cmd == "--version":
1797
    raise _ShowVersion()
1798
  elif cmd == "--help":
1799
    raise _ShowUsage(exit_error=False)
1800
  elif not (cmd in commands or cmd in aliases):
1801
    raise _ShowUsage(exit_error=True)
1802

    
1803
  # get command, unalias it, and look it up in commands
1804
  if cmd in aliases:
1805
    if aliases[cmd] not in commands:
1806
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1807
                                   " command '%s'" % (cmd, aliases[cmd]))
1808

    
1809
    cmd = aliases[cmd]
1810

    
1811
  if cmd in env_override:
1812
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1813
    env_args = os.environ.get(args_env_name)
1814
    if env_args:
1815
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1816

    
1817
  func, args_def, parser_opts, usage, description = commands[cmd]
1818
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1819
                        description=description,
1820
                        formatter=TitledHelpFormatter(),
1821
                        usage="%%prog %s %s" % (cmd, usage))
1822
  parser.disable_interspersed_args()
1823
  options, args = parser.parse_args(args=argv[2:])
1824

    
1825
  if not _CheckArguments(cmd, args_def, args):
1826
    return None, None, None
1827

    
1828
  return func, options, args
1829

    
1830

    
1831
def _FormatUsage(binary, commands):
1832
  """Generates a nice description of all commands.
1833

1834
  @param binary: Script name
1835
  @param commands: Dictionary containing command definitions
1836

1837
  """
1838
  # compute the max line length for cmd + usage
1839
  mlen = min(60, max(map(len, commands)))
1840

    
1841
  yield "Usage: %s {command} [options...] [argument...]" % binary
1842
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1843
  yield ""
1844
  yield "Commands:"
1845

    
1846
  # and format a nice command list
1847
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1848
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1849
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1850
    for line in help_lines:
1851
      yield " %-*s   %s" % (mlen, "", line)
1852

    
1853
  yield ""
1854

    
1855

    
1856
def _CheckArguments(cmd, args_def, args):
1857
  """Verifies the arguments using the argument definition.
1858

1859
  Algorithm:
1860

1861
    1. Abort with error if values specified by user but none expected.
1862

1863
    1. For each argument in definition
1864

1865
      1. Keep running count of minimum number of values (min_count)
1866
      1. Keep running count of maximum number of values (max_count)
1867
      1. If it has an unlimited number of values
1868

1869
        1. Abort with error if it's not the last argument in the definition
1870

1871
    1. If last argument has limited number of values
1872

1873
      1. Abort with error if number of values doesn't match or is too large
1874

1875
    1. Abort with error if user didn't pass enough values (min_count)
1876

1877
  """
1878
  if args and not args_def:
1879
    ToStderr("Error: Command %s expects no arguments", cmd)
1880
    return False
1881

    
1882
  min_count = None
1883
  max_count = None
1884
  check_max = None
1885

    
1886
  last_idx = len(args_def) - 1
1887

    
1888
  for idx, arg in enumerate(args_def):
1889
    if min_count is None:
1890
      min_count = arg.min
1891
    elif arg.min is not None:
1892
      min_count += arg.min
1893

    
1894
    if max_count is None:
1895
      max_count = arg.max
1896
    elif arg.max is not None:
1897
      max_count += arg.max
1898

    
1899
    if idx == last_idx:
1900
      check_max = (arg.max is not None)
1901

    
1902
    elif arg.max is None:
1903
      raise errors.ProgrammerError("Only the last argument can have max=None")
1904

    
1905
  if check_max:
1906
    # Command with exact number of arguments
1907
    if (min_count is not None and max_count is not None and
1908
        min_count == max_count and len(args) != min_count):
1909
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1910
      return False
1911

    
1912
    # Command with limited number of arguments
1913
    if max_count is not None and len(args) > max_count:
1914
      ToStderr("Error: Command %s expects only %d argument(s)",
1915
               cmd, max_count)
1916
      return False
1917

    
1918
  # Command with some required arguments
1919
  if min_count is not None and len(args) < min_count:
1920
    ToStderr("Error: Command %s expects at least %d argument(s)",
1921
             cmd, min_count)
1922
    return False
1923

    
1924
  return True
1925

    
1926

    
1927
def SplitNodeOption(value):
1928
  """Splits the value of a --node option.
1929

1930
  """
1931
  if value and ":" in value:
1932
    return value.split(":", 1)
1933
  else:
1934
    return (value, None)
1935

    
1936

    
1937
def CalculateOSNames(os_name, os_variants):
1938
  """Calculates all the names an OS can be called, according to its variants.
1939

1940
  @type os_name: string
1941
  @param os_name: base name of the os
1942
  @type os_variants: list or None
1943
  @param os_variants: list of supported variants
1944
  @rtype: list
1945
  @return: list of valid names
1946

1947
  """
1948
  if os_variants:
1949
    return ["%s+%s" % (os_name, v) for v in os_variants]
1950
  else:
1951
    return [os_name]
1952

    
1953

    
1954
def ParseFields(selected, default):
1955
  """Parses the values of "--field"-like options.
1956

1957
  @type selected: string or None
1958
  @param selected: User-selected options
1959
  @type default: list
1960
  @param default: Default fields
1961

1962
  """
1963
  if selected is None:
1964
    return default
1965

    
1966
  if selected.startswith("+"):
1967
    return default + selected[1:].split(",")
1968

    
1969
  return selected.split(",")
1970

    
1971

    
1972
UsesRPC = rpc.RunWithRPC
1973

    
1974

    
1975
def AskUser(text, choices=None):
1976
  """Ask the user a question.
1977

1978
  @param text: the question to ask
1979

1980
  @param choices: list with elements tuples (input_char, return_value,
1981
      description); if not given, it will default to: [('y', True,
1982
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1983
      note that the '?' char is reserved for help
1984

1985
  @return: one of the return values from the choices list; if input is
1986
      not possible (i.e. not running with a tty, we return the last
1987
      entry from the list
1988

1989
  """
1990
  if choices is None:
1991
    choices = [("y", True, "Perform the operation"),
1992
               ("n", False, "Do not perform the operation")]
1993
  if not choices or not isinstance(choices, list):
1994
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1995
  for entry in choices:
1996
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1997
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1998

    
1999
  answer = choices[-1][1]
2000
  new_text = []
2001
  for line in text.splitlines():
2002
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
2003
  text = "\n".join(new_text)
2004
  try:
2005
    f = file("/dev/tty", "a+")
2006
  except IOError:
2007
    return answer
2008
  try:
2009
    chars = [entry[0] for entry in choices]
2010
    chars[-1] = "[%s]" % chars[-1]
2011
    chars.append("?")
2012
    maps = dict([(entry[0], entry[1]) for entry in choices])
2013
    while True:
2014
      f.write(text)
2015
      f.write("\n")
2016
      f.write("/".join(chars))
2017
      f.write(": ")
2018
      line = f.readline(2).strip().lower()
2019
      if line in maps:
2020
        answer = maps[line]
2021
        break
2022
      elif line == "?":
2023
        for entry in choices:
2024
          f.write(" %s - %s\n" % (entry[0], entry[2]))
2025
        f.write("\n")
2026
        continue
2027
  finally:
2028
    f.close()
2029
  return answer
2030

    
2031

    
2032
class JobSubmittedException(Exception):
2033
  """Job was submitted, client should exit.
2034

2035
  This exception has one argument, the ID of the job that was
2036
  submitted. The handler should print this ID.
2037

2038
  This is not an error, just a structured way to exit from clients.
2039

2040
  """
2041

    
2042

    
2043
def SendJob(ops, cl=None):
2044
  """Function to submit an opcode without waiting for the results.
2045

2046
  @type ops: list
2047
  @param ops: list of opcodes
2048
  @type cl: luxi.Client
2049
  @param cl: the luxi client to use for communicating with the master;
2050
             if None, a new client will be created
2051

2052
  """
2053
  if cl is None:
2054
    cl = GetClient()
2055

    
2056
  job_id = cl.SubmitJob(ops)
2057

    
2058
  return job_id
2059

    
2060

    
2061
def GenericPollJob(job_id, cbs, report_cbs):
2062
  """Generic job-polling function.
2063

2064
  @type job_id: number
2065
  @param job_id: Job ID
2066
  @type cbs: Instance of L{JobPollCbBase}
2067
  @param cbs: Data callbacks
2068
  @type report_cbs: Instance of L{JobPollReportCbBase}
2069
  @param report_cbs: Reporting callbacks
2070

2071
  """
2072
  prev_job_info = None
2073
  prev_logmsg_serial = None
2074

    
2075
  status = None
2076

    
2077
  while True:
2078
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2079
                                      prev_logmsg_serial)
2080
    if not result:
2081
      # job not found, go away!
2082
      raise errors.JobLost("Job with id %s lost" % job_id)
2083

    
2084
    if result == constants.JOB_NOTCHANGED:
2085
      report_cbs.ReportNotChanged(job_id, status)
2086

    
2087
      # Wait again
2088
      continue
2089

    
2090
    # Split result, a tuple of (field values, log entries)
2091
    (job_info, log_entries) = result
2092
    (status, ) = job_info
2093

    
2094
    if log_entries:
2095
      for log_entry in log_entries:
2096
        (serial, timestamp, log_type, message) = log_entry
2097
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2098
                                    log_type, message)
2099
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2100

    
2101
    # TODO: Handle canceled and archived jobs
2102
    elif status in (constants.JOB_STATUS_SUCCESS,
2103
                    constants.JOB_STATUS_ERROR,
2104
                    constants.JOB_STATUS_CANCELING,
2105
                    constants.JOB_STATUS_CANCELED):
2106
      break
2107

    
2108
    prev_job_info = job_info
2109

    
2110
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2111
  if not jobs:
2112
    raise errors.JobLost("Job with id %s lost" % job_id)
2113

    
2114
  status, opstatus, result = jobs[0]
2115

    
2116
  if status == constants.JOB_STATUS_SUCCESS:
2117
    return result
2118

    
2119
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2120
    raise errors.OpExecError("Job was canceled")
2121

    
2122
  has_ok = False
2123
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2124
    if status == constants.OP_STATUS_SUCCESS:
2125
      has_ok = True
2126
    elif status == constants.OP_STATUS_ERROR:
2127
      errors.MaybeRaise(msg)
2128

    
2129
      if has_ok:
2130
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2131
                                 (idx, msg))
2132

    
2133
      raise errors.OpExecError(str(msg))
2134

    
2135
  # default failure mode
2136
  raise errors.OpExecError(result)
2137

    
2138

    
2139
class JobPollCbBase:
2140
  """Base class for L{GenericPollJob} callbacks.
2141

2142
  """
2143
  def __init__(self):
2144
    """Initializes this class.
2145

2146
    """
2147

    
2148
  def WaitForJobChangeOnce(self, job_id, fields,
2149
                           prev_job_info, prev_log_serial):
2150
    """Waits for changes on a job.
2151

2152
    """
2153
    raise NotImplementedError()
2154

    
2155
  def QueryJobs(self, job_ids, fields):
2156
    """Returns the selected fields for the selected job IDs.
2157

2158
    @type job_ids: list of numbers
2159
    @param job_ids: Job IDs
2160
    @type fields: list of strings
2161
    @param fields: Fields
2162

2163
    """
2164
    raise NotImplementedError()
2165

    
2166

    
2167
class JobPollReportCbBase:
2168
  """Base class for L{GenericPollJob} reporting callbacks.
2169

2170
  """
2171
  def __init__(self):
2172
    """Initializes this class.
2173

2174
    """
2175

    
2176
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2177
    """Handles a log message.
2178

2179
    """
2180
    raise NotImplementedError()
2181

    
2182
  def ReportNotChanged(self, job_id, status):
2183
    """Called for if a job hasn't changed in a while.
2184

2185
    @type job_id: number
2186
    @param job_id: Job ID
2187
    @type status: string or None
2188
    @param status: Job status if available
2189

2190
    """
2191
    raise NotImplementedError()
2192

    
2193

    
2194
class _LuxiJobPollCb(JobPollCbBase):
2195
  def __init__(self, cl):
2196
    """Initializes this class.
2197

2198
    """
2199
    JobPollCbBase.__init__(self)
2200
    self.cl = cl
2201

    
2202
  def WaitForJobChangeOnce(self, job_id, fields,
2203
                           prev_job_info, prev_log_serial):
2204
    """Waits for changes on a job.
2205

2206
    """
2207
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2208
                                        prev_job_info, prev_log_serial)
2209

    
2210
  def QueryJobs(self, job_ids, fields):
2211
    """Returns the selected fields for the selected job IDs.
2212

2213
    """
2214
    return self.cl.QueryJobs(job_ids, fields)
2215

    
2216

    
2217
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2218
  def __init__(self, feedback_fn):
2219
    """Initializes this class.
2220

2221
    """
2222
    JobPollReportCbBase.__init__(self)
2223

    
2224
    self.feedback_fn = feedback_fn
2225

    
2226
    assert callable(feedback_fn)
2227

    
2228
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2229
    """Handles a log message.
2230

2231
    """
2232
    self.feedback_fn((timestamp, log_type, log_msg))
2233

    
2234
  def ReportNotChanged(self, job_id, status):
2235
    """Called if a job hasn't changed in a while.
2236

2237
    """
2238
    # Ignore
2239

    
2240

    
2241
class StdioJobPollReportCb(JobPollReportCbBase):
2242
  def __init__(self):
2243
    """Initializes this class.
2244

2245
    """
2246
    JobPollReportCbBase.__init__(self)
2247

    
2248
    self.notified_queued = False
2249
    self.notified_waitlock = False
2250

    
2251
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2252
    """Handles a log message.
2253

2254
    """
2255
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2256
             FormatLogMessage(log_type, log_msg))
2257

    
2258
  def ReportNotChanged(self, job_id, status):
2259
    """Called if a job hasn't changed in a while.
2260

2261
    """
2262
    if status is None:
2263
      return
2264

    
2265
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2266
      ToStderr("Job %s is waiting in queue", job_id)
2267
      self.notified_queued = True
2268

    
2269
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2270
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2271
      self.notified_waitlock = True
2272

    
2273

    
2274
def FormatLogMessage(log_type, log_msg):
2275
  """Formats a job message according to its type.
2276

2277
  """
2278
  if log_type != constants.ELOG_MESSAGE:
2279
    log_msg = str(log_msg)
2280

    
2281
  return utils.SafeEncode(log_msg)
2282

    
2283

    
2284
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2285
  """Function to poll for the result of a job.
2286

2287
  @type job_id: job identified
2288
  @param job_id: the job to poll for results
2289
  @type cl: luxi.Client
2290
  @param cl: the luxi client to use for communicating with the master;
2291
             if None, a new client will be created
2292

2293
  """
2294
  if cl is None:
2295
    cl = GetClient()
2296

    
2297
  if reporter is None:
2298
    if feedback_fn:
2299
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2300
    else:
2301
      reporter = StdioJobPollReportCb()
2302
  elif feedback_fn:
2303
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2304

    
2305
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2306

    
2307

    
2308
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2309
  """Legacy function to submit an opcode.
2310

2311
  This is just a simple wrapper over the construction of the processor
2312
  instance. It should be extended to better handle feedback and
2313
  interaction functions.
2314

2315
  """
2316
  if cl is None:
2317
    cl = GetClient()
2318

    
2319
  SetGenericOpcodeOpts([op], opts)
2320

    
2321
  job_id = SendJob([op], cl=cl)
2322
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2323
    ToStdout("%d" % job_id)
2324

    
2325
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2326
                       reporter=reporter)
2327

    
2328
  return op_results[0]
2329

    
2330

    
2331
def SubmitOpCodeToDrainedQueue(op):
2332
  """Forcefully insert a job in the queue, even if it is drained.
2333

2334
  """
2335
  cl = GetClient()
2336
  job_id = cl.SubmitJobToDrainedQueue([op])
2337
  op_results = PollJob(job_id, cl=cl)
2338
  return op_results[0]
2339

    
2340

    
2341
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2342
  """Wrapper around SubmitOpCode or SendJob.
2343

2344
  This function will decide, based on the 'opts' parameter, whether to
2345
  submit and wait for the result of the opcode (and return it), or
2346
  whether to just send the job and print its identifier. It is used in
2347
  order to simplify the implementation of the '--submit' option.
2348

2349
  It will also process the opcodes if we're sending the via SendJob
2350
  (otherwise SubmitOpCode does it).
2351

2352
  """
2353
  if opts and opts.submit_only:
2354
    job = [op]
2355
    SetGenericOpcodeOpts(job, opts)
2356
    job_id = SendJob(job, cl=cl)
2357
    if opts.print_jobid:
2358
      ToStdout("%d" % job_id)
2359
    raise JobSubmittedException(job_id)
2360
  else:
2361
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2362

    
2363

    
2364
def _InitReasonTrail(op, opts):
2365
  """Builds the first part of the reason trail
2366

2367
  Builds the initial part of the reason trail, adding the user provided reason
2368
  (if it exists) and the name of the command starting the operation.
2369

2370
  @param op: the opcode the reason trail will be added to
2371
  @param opts: the command line options selected by the user
2372

2373
  """
2374
  assert len(sys.argv) >= 2
2375
  trail = []
2376

    
2377
  if opts.reason:
2378
    trail.append((constants.OPCODE_REASON_SRC_USER,
2379
                  opts.reason,
2380
                  utils.EpochNano()))
2381

    
2382
  binary = os.path.basename(sys.argv[0])
2383
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2384
  command = sys.argv[1]
2385
  trail.append((source, command, utils.EpochNano()))
2386
  op.reason = trail
2387

    
2388

    
2389
def SetGenericOpcodeOpts(opcode_list, options):
2390
  """Processor for generic options.
2391

2392
  This function updates the given opcodes based on generic command
2393
  line options (like debug, dry-run, etc.).
2394

2395
  @param opcode_list: list of opcodes
2396
  @param options: command line options or None
2397
  @return: None (in-place modification)
2398

2399
  """
2400
  if not options:
2401
    return
2402
  for op in opcode_list:
2403
    op.debug_level = options.debug
2404
    if hasattr(options, "dry_run"):
2405
      op.dry_run = options.dry_run
2406
    if getattr(options, "priority", None) is not None:
2407
      op.priority = options.priority
2408
    _InitReasonTrail(op, options)
2409

    
2410

    
2411
def FormatError(err):
2412
  """Return a formatted error message for a given error.
2413

2414
  This function takes an exception instance and returns a tuple
2415
  consisting of two values: first, the recommended exit code, and
2416
  second, a string describing the error message (not
2417
  newline-terminated).
2418

2419
  """
2420
  retcode = 1
2421
  obuf = StringIO()
2422
  msg = str(err)
2423
  if isinstance(err, errors.ConfigurationError):
2424
    txt = "Corrupt configuration file: %s" % msg
2425
    logging.error(txt)
2426
    obuf.write(txt + "\n")
2427
    obuf.write("Aborting.")
2428
    retcode = 2
2429
  elif isinstance(err, errors.HooksAbort):
2430
    obuf.write("Failure: hooks execution failed:\n")
2431
    for node, script, out in err.args[0]:
2432
      if out:
2433
        obuf.write("  node: %s, script: %s, output: %s\n" %
2434
                   (node, script, out))
2435
      else:
2436
        obuf.write("  node: %s, script: %s (no output)\n" %
2437
                   (node, script))
2438
  elif isinstance(err, errors.HooksFailure):
2439
    obuf.write("Failure: hooks general failure: %s" % msg)
2440
  elif isinstance(err, errors.ResolverError):
2441
    this_host = netutils.Hostname.GetSysName()
2442
    if err.args[0] == this_host:
2443
      msg = "Failure: can't resolve my own hostname ('%s')"
2444
    else:
2445
      msg = "Failure: can't resolve hostname '%s'"
2446
    obuf.write(msg % err.args[0])
2447
  elif isinstance(err, errors.OpPrereqError):
2448
    if len(err.args) == 2:
2449
      obuf.write("Failure: prerequisites not met for this"
2450
                 " operation:\nerror type: %s, error details:\n%s" %
2451
                 (err.args[1], err.args[0]))
2452
    else:
2453
      obuf.write("Failure: prerequisites not met for this"
2454
                 " operation:\n%s" % msg)
2455
  elif isinstance(err, errors.OpExecError):
2456
    obuf.write("Failure: command execution error:\n%s" % msg)
2457
  elif isinstance(err, errors.TagError):
2458
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2459
  elif isinstance(err, errors.JobQueueDrainError):
2460
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2461
               " accept new requests\n")
2462
  elif isinstance(err, errors.JobQueueFull):
2463
    obuf.write("Failure: the job queue is full and doesn't accept new"
2464
               " job submissions until old jobs are archived\n")
2465
  elif isinstance(err, errors.TypeEnforcementError):
2466
    obuf.write("Parameter Error: %s" % msg)
2467
  elif isinstance(err, errors.ParameterError):
2468
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2469
  elif isinstance(err, rpcerr.NoMasterError):
2470
    if err.args[0] == pathutils.MASTER_SOCKET:
2471
      daemon = "the master daemon"
2472
    elif err.args[0] == pathutils.QUERY_SOCKET:
2473
      daemon = "the config daemon"
2474
    else:
2475
      daemon = "socket '%s'" % str(err.args[0])
2476
    obuf.write("Cannot communicate with %s.\nIs the process running"
2477
               " and listening for connections?" % daemon)
2478
  elif isinstance(err, rpcerr.TimeoutError):
2479
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2480
               " been submitted and will continue to run even if the call"
2481
               " timed out. Useful commands in this situation are \"gnt-job"
2482
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2483
    obuf.write(msg)
2484
  elif isinstance(err, rpcerr.PermissionError):
2485
    obuf.write("It seems you don't have permissions to connect to the"
2486
               " master daemon.\nPlease retry as a different user.")
2487
  elif isinstance(err, rpcerr.ProtocolError):
2488
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2489
               "%s" % msg)
2490
  elif isinstance(err, errors.JobLost):
2491
    obuf.write("Error checking job status: %s" % msg)
2492
  elif isinstance(err, errors.QueryFilterParseError):
2493
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2494
    obuf.write("\n".join(err.GetDetails()))
2495
  elif isinstance(err, errors.GenericError):
2496
    obuf.write("Unhandled Ganeti error: %s" % msg)
2497
  elif isinstance(err, JobSubmittedException):
2498
    obuf.write("JobID: %s\n" % err.args[0])
2499
    retcode = 0
2500
  else:
2501
    obuf.write("Unhandled exception: %s" % msg)
2502
  return retcode, obuf.getvalue().rstrip("\n")
2503

    
2504

    
2505
def GenericMain(commands, override=None, aliases=None,
2506
                env_override=frozenset()):
2507
  """Generic main function for all the gnt-* commands.
2508

2509
  @param commands: a dictionary with a special structure, see the design doc
2510
                   for command line handling.
2511
  @param override: if not None, we expect a dictionary with keys that will
2512
                   override command line options; this can be used to pass
2513
                   options from the scripts to generic functions
2514
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2515
  @param env_override: list of environment names which are allowed to submit
2516
                       default args for commands
2517

2518
  """
2519
  # save the program name and the entire command line for later logging
2520
  if sys.argv:
2521
    binary = os.path.basename(sys.argv[0])
2522
    if not binary:
2523
      binary = sys.argv[0]
2524

    
2525
    if len(sys.argv) >= 2:
2526
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2527
    else:
2528
      logname = binary
2529

    
2530
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2531
  else:
2532
    binary = "<unknown program>"
2533
    cmdline = "<unknown>"
2534

    
2535
  if aliases is None:
2536
    aliases = {}
2537

    
2538
  try:
2539
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2540
                                       env_override)
2541
  except _ShowVersion:
2542
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2543
             constants.RELEASE_VERSION)
2544
    return constants.EXIT_SUCCESS
2545
  except _ShowUsage, err:
2546
    for line in _FormatUsage(binary, commands):
2547
      ToStdout(line)
2548

    
2549
    if err.exit_error:
2550
      return constants.EXIT_FAILURE
2551
    else:
2552
      return constants.EXIT_SUCCESS
2553
  except errors.ParameterError, err:
2554
    result, err_msg = FormatError(err)
2555
    ToStderr(err_msg)
2556
    return 1
2557

    
2558
  if func is None: # parse error
2559
    return 1
2560

    
2561
  if override is not None:
2562
    for key, val in override.iteritems():
2563
      setattr(options, key, val)
2564

    
2565
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2566
                     stderr_logging=True)
2567

    
2568
  logging.info("Command line: %s", cmdline)
2569

    
2570
  try:
2571
    result = func(options, args)
2572
  except (errors.GenericError, rpcerr.ProtocolError,
2573
          JobSubmittedException), err:
2574
    result, err_msg = FormatError(err)
2575
    logging.exception("Error during command processing")
2576
    ToStderr(err_msg)
2577
  except KeyboardInterrupt:
2578
    result = constants.EXIT_FAILURE
2579
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2580
             " might have been submitted and"
2581
             " will continue to run in the background.")
2582
  except IOError, err:
2583
    if err.errno == errno.EPIPE:
2584
      # our terminal went away, we'll exit
2585
      sys.exit(constants.EXIT_FAILURE)
2586
    else:
2587
      raise
2588

    
2589
  return result
2590

    
2591

    
2592
def ParseNicOption(optvalue):
2593
  """Parses the value of the --net option(s).
2594

2595
  """
2596
  try:
2597
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2598
  except (TypeError, ValueError), err:
2599
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2600
                               errors.ECODE_INVAL)
2601

    
2602
  nics = [{}] * nic_max
2603
  for nidx, ndict in optvalue:
2604
    nidx = int(nidx)
2605

    
2606
    if not isinstance(ndict, dict):
2607
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2608
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2609

    
2610
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2611

    
2612
    nics[nidx] = ndict
2613

    
2614
  return nics
2615

    
2616

    
2617
def FixHvParams(hvparams):
2618
  # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
2619
  # comma to space because commas cannot be accepted on the command line
2620
  # (they already act as the separator between different hvparams). Still,
2621
  # RAPI should be able to accept commas for backwards compatibility.
2622
  # Therefore, we convert spaces into commas here, and we keep the old
2623
  # parsing logic everywhere else.
2624
  try:
2625
    new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",")
2626
    hvparams[constants.HV_USB_DEVICES] = new_usb_devices
2627
  except KeyError:
2628
    #No usb_devices, no modification required
2629
    pass
2630

    
2631

    
2632
def GenericInstanceCreate(mode, opts, args):
2633
  """Add an instance to the cluster via either creation or import.
2634

2635
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2636
  @param opts: the command line options selected by the user
2637
  @type args: list
2638
  @param args: should contain only one element, the new instance name
2639
  @rtype: int
2640
  @return: the desired exit code
2641

2642
  """
2643
  instance = args[0]
2644

    
2645
  (pnode, snode) = SplitNodeOption(opts.node)
2646

    
2647
  hypervisor = None
2648
  hvparams = {}
2649
  if opts.hypervisor:
2650
    hypervisor, hvparams = opts.hypervisor
2651

    
2652
  if opts.nics:
2653
    nics = ParseNicOption(opts.nics)
2654
  elif opts.no_nics:
2655
    # no nics
2656
    nics = []
2657
  elif mode == constants.INSTANCE_CREATE:
2658
    # default of one nic, all auto
2659
    nics = [{}]
2660
  else:
2661
    # mode == import
2662
    nics = []
2663

    
2664
  if opts.disk_template == constants.DT_DISKLESS:
2665
    if opts.disks or opts.sd_size is not None:
2666
      raise errors.OpPrereqError("Diskless instance but disk"
2667
                                 " information passed", errors.ECODE_INVAL)
2668
    disks = []
2669
  else:
2670
    if (not opts.disks and not opts.sd_size
2671
        and mode == constants.INSTANCE_CREATE):
2672
      raise errors.OpPrereqError("No disk information specified",
2673
                                 errors.ECODE_INVAL)
2674
    if opts.disks and opts.sd_size is not None:
2675
      raise errors.OpPrereqError("Please use either the '--disk' or"
2676
                                 " '-s' option", errors.ECODE_INVAL)
2677
    if opts.sd_size is not None:
2678
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2679

    
2680
    if opts.disks:
2681
      try:
2682
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2683
      except ValueError, err:
2684
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2685
                                   errors.ECODE_INVAL)
2686
      disks = [{}] * disk_max
2687
    else:
2688
      disks = []
2689
    for didx, ddict in opts.disks:
2690
      didx = int(didx)
2691
      if not isinstance(ddict, dict):
2692
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2693
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2694
      elif constants.IDISK_SIZE in ddict:
2695
        if constants.IDISK_ADOPT in ddict:
2696
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2697
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2698
        try:
2699
          ddict[constants.IDISK_SIZE] = \
2700
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2701
        except ValueError, err:
2702
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2703
                                     (didx, err), errors.ECODE_INVAL)
2704
      elif constants.IDISK_ADOPT in ddict:
2705
        if constants.IDISK_SPINDLES in ddict:
2706
          raise errors.OpPrereqError("spindles is not a valid option when"
2707
                                     " adopting a disk", errors.ECODE_INVAL)
2708
        if mode == constants.INSTANCE_IMPORT:
2709
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2710
                                     " import", errors.ECODE_INVAL)
2711
        ddict[constants.IDISK_SIZE] = 0
2712
      else:
2713
        raise errors.OpPrereqError("Missing size or adoption source for"
2714
                                   " disk %d" % didx, errors.ECODE_INVAL)
2715
      if constants.IDISK_SPINDLES in ddict:
2716
        ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
2717

    
2718
      disks[didx] = ddict
2719

    
2720
  if opts.tags is not None:
2721
    tags = opts.tags.split(",")
2722
  else:
2723
    tags = []
2724

    
2725
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2726
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2727
  FixHvParams(hvparams)
2728

    
2729
  if mode == constants.INSTANCE_CREATE:
2730
    start = opts.start
2731
    os_type = opts.os
2732
    force_variant = opts.force_variant
2733
    src_node = None
2734
    src_path = None
2735
    no_install = opts.no_install
2736
    identify_defaults = False
2737
    compress = constants.IEC_NONE
2738
    instance_communication = opts.instance_communication
2739
  elif mode == constants.INSTANCE_IMPORT:
2740
    start = False
2741
    os_type = None
2742
    force_variant = False
2743
    src_node = opts.src_node
2744
    src_path = opts.src_dir
2745
    no_install = None
2746
    identify_defaults = opts.identify_defaults
2747
    compress = opts.compress
2748
    instance_communication = False
2749
  else:
2750
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2751

    
2752
  op = opcodes.OpInstanceCreate(instance_name=instance,
2753
                                disks=disks,
2754
                                disk_template=opts.disk_template,
2755
                                nics=nics,
2756
                                conflicts_check=opts.conflicts_check,
2757
                                pnode=pnode, snode=snode,
2758
                                ip_check=opts.ip_check,
2759
                                name_check=opts.name_check,
2760
                                wait_for_sync=opts.wait_for_sync,
2761
                                file_storage_dir=opts.file_storage_dir,
2762
                                file_driver=opts.file_driver,
2763
                                iallocator=opts.iallocator,
2764
                                hypervisor=hypervisor,
2765
                                hvparams=hvparams,
2766
                                beparams=opts.beparams,
2767
                                osparams=opts.osparams,
2768
                                mode=mode,
2769
                                start=start,
2770
                                os_type=os_type,
2771
                                force_variant=force_variant,
2772
                                src_node=src_node,
2773
                                src_path=src_path,
2774
                                compress=compress,
2775
                                tags=tags,
2776
                                no_install=no_install,
2777
                                identify_defaults=identify_defaults,
2778
                                ignore_ipolicy=opts.ignore_ipolicy,
2779
                                instance_communication=instance_communication)
2780

    
2781
  SubmitOrSend(op, opts)
2782
  return 0
2783

    
2784

    
2785
class _RunWhileClusterStoppedHelper:
2786
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2787

2788
  """
2789
  def __init__(self, feedback_fn, cluster_name, master_node,
2790
               online_nodes, ssh_ports):
2791
    """Initializes this class.
2792

2793
    @type feedback_fn: callable
2794
    @param feedback_fn: Feedback function
2795
    @type cluster_name: string
2796
    @param cluster_name: Cluster name
2797
    @type master_node: string
2798
    @param master_node Master node name
2799
    @type online_nodes: list
2800
    @param online_nodes: List of names of online nodes
2801
    @type ssh_ports: list
2802
    @param ssh_ports: List of SSH ports of online nodes
2803

2804
    """
2805
    self.feedback_fn = feedback_fn
2806
    self.cluster_name = cluster_name
2807
    self.master_node = master_node
2808
    self.online_nodes = online_nodes
2809
    self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2810

    
2811
    self.ssh = ssh.SshRunner(self.cluster_name)
2812

    
2813
    self.nonmaster_nodes = [name for name in online_nodes
2814
                            if name != master_node]
2815

    
2816
    assert self.master_node not in self.nonmaster_nodes
2817

    
2818
  def _RunCmd(self, node_name, cmd):
2819
    """Runs a command on the local or a remote machine.
2820

2821
    @type node_name: string
2822
    @param node_name: Machine name
2823
    @type cmd: list
2824
    @param cmd: Command
2825

2826
    """
2827
    if node_name is None or node_name == self.master_node:
2828
      # No need to use SSH
2829
      result = utils.RunCmd(cmd)
2830
    else:
2831
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2832
                            utils.ShellQuoteArgs(cmd),
2833
                            port=self.ssh_ports[node_name])
2834

    
2835
    if result.failed:
2836
      errmsg = ["Failed to run command %s" % result.cmd]
2837
      if node_name:
2838
        errmsg.append("on node %s" % node_name)
2839
      errmsg.append(": exitcode %s and error %s" %
2840
                    (result.exit_code, result.output))
2841
      raise errors.OpExecError(" ".join(errmsg))
2842

    
2843
  def Call(self, fn, *args):
2844
    """Call function while all daemons are stopped.
2845

2846
    @type fn: callable
2847
    @param fn: Function to be called
2848

2849
    """
2850
    # Pause watcher by acquiring an exclusive lock on watcher state file
2851
    self.feedback_fn("Blocking watcher")
2852
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2853
    try:
2854
      # TODO: Currently, this just blocks. There's no timeout.
2855
      # TODO: Should it be a shared lock?
2856
      watcher_block.Exclusive(blocking=True)
2857

    
2858
      # Stop master daemons, so that no new jobs can come in and all running
2859
      # ones are finished
2860
      self.feedback_fn("Stopping master daemons")
2861
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2862
      try:
2863
        # Stop daemons on all nodes
2864
        for node_name in self.online_nodes:
2865
          self.feedback_fn("Stopping daemons on %s" % node_name)
2866
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2867

    
2868
        # All daemons are shut down now
2869
        try:
2870
          return fn(self, *args)
2871
        except Exception, err:
2872
          _, errmsg = FormatError(err)
2873
          logging.exception("Caught exception")
2874
          self.feedback_fn(errmsg)
2875
          raise
2876
      finally:
2877
        # Start cluster again, master node last
2878
        for node_name in self.nonmaster_nodes + [self.master_node]:
2879
          self.feedback_fn("Starting daemons on %s" % node_name)
2880
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2881
    finally:
2882
      # Resume watcher
2883
      watcher_block.Close()
2884

    
2885

    
2886
def RunWhileClusterStopped(feedback_fn, fn, *args):
2887
  """Calls a function while all cluster daemons are stopped.
2888

2889
  @type feedback_fn: callable
2890
  @param feedback_fn: Feedback function
2891
  @type fn: callable
2892
  @param fn: Function to be called when daemons are stopped
2893

2894
  """
2895
  feedback_fn("Gathering cluster information")
2896

    
2897
  # This ensures we're running on the master daemon
2898
  cl = GetClient()
2899
  # Query client
2900
  qcl = GetClient(query=True)
2901

    
2902
  (cluster_name, master_node) = \
2903
    cl.QueryConfigValues(["cluster_name", "master_node"])
2904

    
2905
  online_nodes = GetOnlineNodes([], cl=qcl)
2906
  ssh_ports = GetNodesSshPorts(online_nodes, qcl)
2907

    
2908
  # Don't keep a reference to the client. The master daemon will go away.
2909
  del cl
2910
  del qcl
2911

    
2912
  assert master_node in online_nodes
2913

    
2914
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2915
                                       online_nodes, ssh_ports).Call(fn, *args)
2916

    
2917

    
2918
def GenerateTable(headers, fields, separator, data,
2919
                  numfields=None, unitfields=None,
2920
                  units=None):
2921
  """Prints a table with headers and different fields.
2922

2923
  @type headers: dict
2924
  @param headers: dictionary mapping field names to headers for
2925
      the table
2926
  @type fields: list
2927
  @param fields: the field names corresponding to each row in
2928
      the data field
2929
  @param separator: the separator to be used; if this is None,
2930
      the default 'smart' algorithm is used which computes optimal
2931
      field width, otherwise just the separator is used between
2932
      each field
2933
  @type data: list
2934
  @param data: a list of lists, each sublist being one row to be output
2935
  @type numfields: list
2936
  @param numfields: a list with the fields that hold numeric
2937
      values and thus should be right-aligned
2938
  @type unitfields: list
2939
  @param unitfields: a list with the fields that hold numeric
2940
      values that should be formatted with the units field
2941
  @type units: string or None
2942
  @param units: the units we should use for formatting, or None for
2943
      automatic choice (human-readable for non-separator usage, otherwise
2944
      megabytes); this is a one-letter string
2945

2946
  """
2947
  if units is None:
2948
    if separator:
2949
      units = "m"
2950
    else:
2951
      units = "h"
2952

    
2953
  if numfields is None:
2954
    numfields = []
2955
  if unitfields is None:
2956
    unitfields = []
2957

    
2958
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2959
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2960

    
2961
  format_fields = []
2962
  for field in fields:
2963
    if headers and field not in headers:
2964
      # TODO: handle better unknown fields (either revert to old
2965
      # style of raising exception, or deal more intelligently with
2966
      # variable fields)
2967
      headers[field] = field
2968
    if separator is not None:
2969
      format_fields.append("%s")
2970
    elif numfields.Matches(field):
2971
      format_fields.append("%*s")
2972
    else:
2973
      format_fields.append("%-*s")
2974

    
2975
  if separator is None:
2976
    mlens = [0 for name in fields]
2977
    format_str = " ".join(format_fields)
2978
  else:
2979
    format_str = separator.replace("%", "%%").join(format_fields)
2980

    
2981
  for row in data:
2982
    if row is None:
2983
      continue
2984
    for idx, val in enumerate(row):
2985
      if unitfields.Matches(fields[idx]):
2986
        try:
2987
          val = int(val)
2988
        except (TypeError, ValueError):
2989
          pass
2990
        else:
2991
          val = row[idx] = utils.FormatUnit(val, units)
2992
      val = row[idx] = str(val)
2993
      if separator is None:
2994
        mlens[idx] = max(mlens[idx], len(val))
2995

    
2996
  result = []
2997
  if headers:
2998
    args = []
2999
    for idx, name in enumerate(fields):
3000
      hdr = headers[name]
3001
      if separator is None:
3002
        mlens[idx] = max(mlens[idx], len(hdr))
3003
        args.append(mlens[idx])
3004
      args.append(hdr)
3005
    result.append(format_str % tuple(args))
3006

    
3007
  if separator is None:
3008
    assert len(mlens) == len(fields)
3009

    
3010
    if fields and not numfields.Matches(fields[-1]):
3011
      mlens[-1] = 0
3012

    
3013
  for line in data:
3014
    args = []
3015
    if line is None:
3016
      line = ["-" for _ in fields]
3017
    for idx in range(len(fields)):
3018
      if separator is None:
3019
        args.append(mlens[idx])
3020
      args.append(line[idx])
3021
    result.append(format_str % tuple(args))
3022

    
3023
  return result
3024

    
3025

    
3026
def _FormatBool(value):
3027
  """Formats a boolean value as a string.
3028

3029
  """
3030
  if value:
3031
    return "Y"
3032
  return "N"
3033

    
3034

    
3035
#: Default formatting for query results; (callback, align right)
3036
_DEFAULT_FORMAT_QUERY = {
3037
  constants.QFT_TEXT: (str, False),
3038
  constants.QFT_BOOL: (_FormatBool, False),
3039
  constants.QFT_NUMBER: (str, True),
3040
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3041
  constants.QFT_OTHER: (str, False),
3042
  constants.QFT_UNKNOWN: (str, False),
3043
  }
3044

    
3045

    
3046
def _GetColumnFormatter(fdef, override, unit):
3047
  """Returns formatting function for a field.
3048

3049
  @type fdef: L{objects.QueryFieldDefinition}
3050
  @type override: dict
3051
  @param override: Dictionary for overriding field formatting functions,
3052
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3053
  @type unit: string
3054
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3055
  @rtype: tuple; (callable, bool)
3056
  @return: Returns the function to format a value (takes one parameter) and a
3057
    boolean for aligning the value on the right-hand side
3058

3059
  """
3060
  fmt = override.get(fdef.name, None)
3061
  if fmt is not None:
3062
    return fmt
3063

    
3064
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3065

    
3066
  if fdef.kind == constants.QFT_UNIT:
3067
    # Can't keep this information in the static dictionary
3068
    return (lambda value: utils.FormatUnit(value, unit), True)
3069

    
3070
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3071
  if fmt is not None:
3072
    return fmt
3073

    
3074
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3075

    
3076

    
3077
class _QueryColumnFormatter:
3078
  """Callable class for formatting fields of a query.
3079

3080
  """
3081
  def __init__(self, fn, status_fn, verbose):
3082
    """Initializes this class.
3083

3084
    @type fn: callable
3085
    @param fn: Formatting function
3086
    @type status_fn: callable
3087
    @param status_fn: Function to report fields' status
3088
    @type verbose: boolean
3089
    @param verbose: whether to use verbose field descriptions or not
3090

3091
    """
3092
    self._fn = fn
3093
    self._status_fn = status_fn
3094
    self._verbose = verbose
3095

    
3096
  def __call__(self, data):
3097
    """Returns a field's string representation.
3098

3099
    """
3100
    (status, value) = data
3101

    
3102
    # Report status
3103
    self._status_fn(status)
3104

    
3105
    if status == constants.RS_NORMAL:
3106
      return self._fn(value)
3107

    
3108
    assert value is None, \
3109
           "Found value %r for abnormal status %s" % (value, status)
3110

    
3111
    return FormatResultError(status, self._verbose)
3112

    
3113

    
3114
def FormatResultError(status, verbose):
3115
  """Formats result status other than L{constants.RS_NORMAL}.
3116

3117
  @param status: The result status
3118
  @type verbose: boolean
3119
  @param verbose: Whether to return the verbose text
3120
  @return: Text of result status
3121

3122
  """
3123
  assert status != constants.RS_NORMAL, \
3124
         "FormatResultError called with status equal to constants.RS_NORMAL"
3125
  try:
3126
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3127
  except KeyError:
3128
    raise NotImplementedError("Unknown status %s" % status)
3129
  else:
3130
    if verbose:
3131
      return verbose_text
3132
    return normal_text
3133

    
3134

    
3135
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3136
                      header=False, verbose=False):
3137
  """Formats data in L{objects.QueryResponse}.
3138

3139
  @type result: L{objects.QueryResponse}
3140
  @param result: result of query operation
3141
  @type unit: string
3142
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3143
    see L{utils.text.FormatUnit}
3144
  @type format_override: dict
3145
  @param format_override: Dictionary for overriding field formatting functions,
3146
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3147
  @type separator: string or None
3148
  @param separator: String used to separate fields
3149
  @type header: bool
3150
  @param header: Whether to output header row
3151
  @type verbose: boolean
3152
  @param verbose: whether to use verbose field descriptions or not
3153

3154
  """
3155
  if unit is None:
3156
    if separator:
3157
      unit = "m"
3158
    else:
3159
      unit = "h"
3160

    
3161
  if format_override is None:
3162
    format_override = {}
3163

    
3164
  stats = dict.fromkeys(constants.RS_ALL, 0)
3165

    
3166
  def _RecordStatus(status):
3167
    if status in stats:
3168
      stats[status] += 1
3169

    
3170
  columns = []
3171
  for fdef in result.fields:
3172
    assert fdef.title and fdef.name
3173
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3174
    columns.append(TableColumn(fdef.title,
3175
                               _QueryColumnFormatter(fn, _RecordStatus,
3176
                                                     verbose),
3177
                               align_right))
3178

    
3179
  table = FormatTable(result.data, columns, header, separator)
3180

    
3181
  # Collect statistics
3182
  assert len(stats) == len(constants.RS_ALL)
3183
  assert compat.all(count >= 0 for count in stats.values())
3184

    
3185
  # Determine overall status. If there was no data, unknown fields must be
3186
  # detected via the field definitions.
3187
  if (stats[constants.RS_UNKNOWN] or
3188
      (not result.data and _GetUnknownFields(result.fields))):
3189
    status = QR_UNKNOWN
3190
  elif compat.any(count > 0 for key, count in stats.items()
3191
                  if key != constants.RS_NORMAL):
3192
    status = QR_INCOMPLETE
3193
  else:
3194
    status = QR_NORMAL
3195

    
3196
  return (status, table)
3197

    
3198

    
3199
def _GetUnknownFields(fdefs):
3200
  """Returns list of unknown fields included in C{fdefs}.
3201

3202
  @type fdefs: list of L{objects.QueryFieldDefinition}
3203

3204
  """
3205
  return [fdef for fdef in fdefs
3206
          if fdef.kind == constants.QFT_UNKNOWN]
3207

    
3208

    
3209
def _WarnUnknownFields(fdefs):
3210
  """Prints a warning to stderr if a query included unknown fields.
3211

3212
  @type fdefs: list of L{objects.QueryFieldDefinition}
3213

3214
  """
3215
  unknown = _GetUnknownFields(fdefs)
3216
  if unknown:
3217
    ToStderr("Warning: Queried for unknown fields %s",
3218
             utils.CommaJoin(fdef.name for fdef in unknown))
3219
    return True
3220

    
3221
  return False
3222

    
3223

    
3224
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3225
                format_override=None, verbose=False, force_filter=False,
3226
                namefield=None, qfilter=None, isnumeric=False):
3227
  """Generic implementation for listing all items of a resource.
3228

3229
  @param resource: One of L{constants.QR_VIA_LUXI}
3230
  @type fields: list of strings
3231
  @param fields: List of fields to query for
3232
  @type names: list of strings
3233
  @param names: Names of items to query for
3234
  @type unit: string or None
3235
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3236
    None for automatic choice (human-readable for non-separator usage,
3237
    otherwise megabytes); this is a one-letter string
3238
  @type separator: string or None
3239
  @param separator: String used to separate fields
3240
  @type header: bool
3241
  @param header: Whether to show header row
3242
  @type force_filter: bool
3243
  @param force_filter: Whether to always treat names as filter
3244
  @type format_override: dict
3245
  @param format_override: Dictionary for overriding field formatting functions,
3246
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3247
  @type verbose: boolean
3248
  @param verbose: whether to use verbose field descriptions or not
3249
  @type namefield: string
3250
  @param namefield: Name of field to use for simple filters (see
3251
    L{qlang.MakeFilter} for details)
3252
  @type qfilter: list or None
3253
  @param qfilter: Query filter (in addition to names)
3254
  @param isnumeric: bool
3255
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3256
    any simple filters built by namefield should use integer values to
3257
    reflect that
3258

3259
  """
3260
  if not names:
3261
    names = None
3262

    
3263
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3264
                                isnumeric=isnumeric)
3265

    
3266
  if qfilter is None:
3267
    qfilter = namefilter
3268
  elif namefilter is not None:
3269
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3270

    
3271
  if cl is None:
3272
    cl = GetClient()
3273

    
3274
  response = cl.Query(resource, fields, qfilter)
3275

    
3276
  found_unknown = _WarnUnknownFields(response.fields)
3277

    
3278
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3279
                                     header=header,
3280
                                     format_override=format_override,
3281
                                     verbose=verbose)
3282

    
3283
  for line in data:
3284
    ToStdout(line)
3285

    
3286
  assert ((found_unknown and status == QR_UNKNOWN) or
3287
          (not found_unknown and status != QR_UNKNOWN))
3288

    
3289
  if status == QR_UNKNOWN:
3290
    return constants.EXIT_UNKNOWN_FIELD
3291

    
3292
  # TODO: Should the list command fail if not all data could be collected?
3293
  return constants.EXIT_SUCCESS
3294

    
3295

    
3296
def _FieldDescValues(fdef):
3297
  """Helper function for L{GenericListFields} to get query field description.
3298

3299
  @type fdef: L{objects.QueryFieldDefinition}
3300
  @rtype: list
3301

3302
  """
3303
  return [
3304
    fdef.name,
3305
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3306
    fdef.title,
3307
    fdef.doc,
3308
    ]
3309

    
3310

    
3311
def GenericListFields(resource, fields, separator, header, cl=None):
3312
  """Generic implementation for listing fields for a resource.
3313

3314
  @param resource: One of L{constants.QR_VIA_LUXI}
3315
  @type fields: list of strings
3316
  @param fields: List of fields to query for
3317
  @type separator: string or None
3318
  @param separator: String used to separate fields
3319
  @type header: bool
3320
  @param header: Whether to show header row
3321

3322
  """
3323
  if cl is None:
3324
    cl = GetClient()
3325

    
3326
  if not fields:
3327
    fields = None
3328

    
3329
  response = cl.QueryFields(resource, fields)
3330

    
3331
  found_unknown = _WarnUnknownFields(response.fields)
3332

    
3333
  columns = [
3334
    TableColumn("Name", str, False),
3335
    TableColumn("Type", str, False),
3336
    TableColumn("Title", str, False),
3337
    TableColumn("Description", str, False),
3338
    ]
3339

    
3340
  rows = map(_FieldDescValues, response.fields)
3341

    
3342
  for line in FormatTable(rows, columns, header, separator):
3343
    ToStdout(line)
3344

    
3345
  if found_unknown:
3346
    return constants.EXIT_UNKNOWN_FIELD
3347

    
3348
  return constants.EXIT_SUCCESS
3349

    
3350

    
3351
class TableColumn:
3352
  """Describes a column for L{FormatTable}.
3353

3354
  """
3355
  def __init__(self, title, fn, align_right):
3356
    """Initializes this class.
3357

3358
    @type title: string
3359
    @param title: Column title
3360
    @type fn: callable
3361
    @param fn: Formatting function
3362
    @type align_right: bool
3363
    @param align_right: Whether to align values on the right-hand side
3364

3365
    """
3366
    self.title = title
3367
    self.format = fn
3368
    self.align_right = align_right
3369

    
3370

    
3371
def _GetColFormatString(width, align_right):
3372
  """Returns the format string for a field.
3373

3374
  """
3375
  if align_right:
3376
    sign = ""
3377
  else:
3378
    sign = "-"
3379

    
3380
  return "%%%s%ss" % (sign, width)
3381

    
3382

    
3383
def FormatTable(rows, columns, header, separator):
3384
  """Formats data as a table.
3385

3386
  @type rows: list of lists
3387
  @param rows: Row data, one list per row
3388
  @type columns: list of L{TableColumn}
3389
  @param columns: Column descriptions
3390
  @type header: bool
3391
  @param header: Whether to show header row
3392
  @type separator: string or None
3393
  @param separator: String used to separate columns
3394

3395
  """
3396
  if header:
3397
    data = [[col.title for col in columns]]
3398
    colwidth = [len(col.title) for col in columns]
3399
  else:
3400
    data = []
3401
    colwidth = [0 for _ in columns]
3402

    
3403
  # Format row data
3404
  for row in rows:
3405
    assert len(row) == len(columns)
3406

    
3407
    formatted = [col.format(value) for value, col in zip(row, columns)]
3408

    
3409
    if separator is None:
3410
      # Update column widths
3411
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3412
        # Modifying a list's items while iterating is fine
3413
        colwidth[idx] = max(oldwidth, len(value))
3414

    
3415
    data.append(formatted)
3416

    
3417
  if separator is not None:
3418
    # Return early if a separator is used
3419
    return [separator.join(row) for row in data]
3420

    
3421
  if columns and not columns[-1].align_right:
3422
    # Avoid unnecessary spaces at end of line
3423
    colwidth[-1] = 0
3424

    
3425
  # Build format string
3426
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3427
                  for col, width in zip(columns, colwidth)])
3428

    
3429
  return [fmt % tuple(row) for row in data]
3430

    
3431

    
3432
def FormatTimestamp(ts):
3433
  """Formats a given timestamp.
3434

3435
  @type ts: timestamp
3436
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3437

3438
  @rtype: string
3439
  @return: a string with the formatted timestamp
3440

3441
  """
3442
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3443
    return "?"
3444

    
3445
  (sec, usecs) = ts
3446
  return utils.FormatTime(sec, usecs=usecs)
3447

    
3448

    
3449
def ParseTimespec(value):
3450
  """Parse a time specification.
3451

3452
  The following suffixed will be recognized:
3453

3454
    - s: seconds
3455
    - m: minutes
3456
    - h: hours
3457
    - d: day
3458
    - w: weeks
3459

3460
  Without any suffix, the value will be taken to be in seconds.
3461

3462
  """
3463
  value = str(value)
3464
  if not value:
3465
    raise errors.OpPrereqError("Empty time specification passed",
3466
                               errors.ECODE_INVAL)
3467
  suffix_map = {
3468
    "s": 1,
3469
    "m": 60,
3470
    "h": 3600,
3471
    "d": 86400,
3472
    "w": 604800,
3473
    }
3474
  if value[-1] not in suffix_map:
3475
    try:
3476
      value = int(value)
3477
    except (TypeError, ValueError):
3478
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3479
                                 errors.ECODE_INVAL)
3480
  else:
3481
    multiplier = suffix_map[value[-1]]
3482
    value = value[:-1]
3483
    if not value: # no data left after stripping the suffix
3484
      raise errors.OpPrereqError("Invalid time specification (only"
3485
                                 " suffix passed)", errors.ECODE_INVAL)
3486
    try:
3487
      value = int(value) * multiplier
3488
    except (TypeError, ValueError):
3489
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3490
                                 errors.ECODE_INVAL)
3491
  return value
3492

    
3493

    
3494
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3495
                   filter_master=False, nodegroup=None):
3496
  """Returns the names of online nodes.
3497

3498
  This function will also log a warning on stderr with the names of
3499
  the online nodes.
3500

3501
  @param nodes: if not empty, use only this subset of nodes (minus the
3502
      offline ones)
3503
  @param cl: if not None, luxi client to use
3504
  @type nowarn: boolean
3505
  @param nowarn: by default, this function will output a note with the
3506
      offline nodes that are skipped; if this parameter is True the
3507
      note is not displayed
3508
  @type secondary_ips: boolean
3509
  @param secondary_ips: if True, return the secondary IPs instead of the
3510
      names, useful for doing network traffic over the replication interface
3511
      (if any)
3512
  @type filter_master: boolean
3513
  @param filter_master: if True, do not return the master node in the list
3514
      (useful in coordination with secondary_ips where we cannot check our
3515
      node name against the list)
3516
  @type nodegroup: string
3517
  @param nodegroup: If set, only return nodes in this node group
3518

3519
  """
3520
  if cl is None:
3521
    cl = GetClient(query=True)
3522

    
3523
  qfilter = []
3524

    
3525
  if nodes:
3526
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3527

    
3528
  if nodegroup is not None:
3529
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3530
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3531

    
3532
  if filter_master:
3533
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3534

    
3535
  if qfilter:
3536
    if len(qfilter) > 1:
3537
      final_filter = [qlang.OP_AND] + qfilter
3538
    else:
3539
      assert len(qfilter) == 1
3540
      final_filter = qfilter[0]
3541
  else:
3542
    final_filter = None
3543

    
3544
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3545

    
3546
  def _IsOffline(row):
3547
    (_, (_, offline), _) = row
3548
    return offline
3549

    
3550
  def _GetName(row):
3551
    ((_, name), _, _) = row
3552
    return name
3553

    
3554
  def _GetSip(row):
3555
    (_, _, (_, sip)) = row
3556
    return sip
3557

    
3558
  (offline, online) = compat.partition(result.data, _IsOffline)
3559

    
3560
  if offline and not nowarn:
3561
    ToStderr("Note: skipping offline node(s): %s" %
3562
             utils.CommaJoin(map(_GetName, offline)))
3563

    
3564
  if secondary_ips:
3565
    fn = _GetSip
3566
  else:
3567
    fn = _GetName
3568

    
3569
  return map(fn, online)
3570

    
3571

    
3572
def GetNodesSshPorts(nodes, cl):
3573
  """Retrieves SSH ports of given nodes.
3574

3575
  @param nodes: the names of nodes
3576
  @type nodes: a list of strings
3577
  @param cl: a client to use for the query
3578
  @type cl: L{Client}
3579
  @return: the list of SSH ports corresponding to the nodes
3580
  @rtype: a list of tuples
3581
  """
3582
  return map(lambda t: t[0],
3583
             cl.QueryNodes(names=nodes,
3584
                           fields=["ndp/ssh_port"],
3585
                           use_locking=False))
3586

    
3587

    
3588
def _ToStream(stream, txt, *args):
3589
  """Write a message to a stream, bypassing the logging system
3590

3591
  @type stream: file object
3592
  @param stream: the file to which we should write
3593
  @type txt: str
3594
  @param txt: the message
3595

3596
  """
3597
  try:
3598
    if args:
3599
      args = tuple(args)
3600
      stream.write(txt % args)
3601
    else:
3602
      stream.write(txt)
3603
    stream.write("\n")
3604
    stream.flush()
3605
  except IOError, err:
3606
    if err.errno == errno.EPIPE:
3607
      # our terminal went away, we'll exit
3608
      sys.exit(constants.EXIT_FAILURE)
3609
    else:
3610
      raise
3611

    
3612

    
3613
def ToStdout(txt, *args):
3614
  """Write a message to stdout only, bypassing the logging system
3615

3616
  This is just a wrapper over _ToStream.
3617

3618
  @type txt: str
3619
  @param txt: the message
3620

3621
  """
3622
  _ToStream(sys.stdout, txt, *args)
3623

    
3624

    
3625
def ToStderr(txt, *args):
3626
  """Write a message to stderr only, bypassing the logging system
3627

3628
  This is just a wrapper over _ToStream.
3629

3630
  @type txt: str
3631
  @param txt: the message
3632

3633
  """
3634
  _ToStream(sys.stderr, txt, *args)
3635

    
3636

    
3637
class JobExecutor(object):
3638
  """Class which manages the submission and execution of multiple jobs.
3639

3640
  Note that instances of this class should not be reused between
3641
  GetResults() calls.
3642

3643
  """
3644
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3645
    self.queue = []
3646
    if cl is None:
3647
      cl = GetClient()
3648
    self.cl = cl
3649
    self.verbose = verbose
3650
    self.jobs = []
3651
    self.opts = opts
3652
    self.feedback_fn = feedback_fn
3653
    self._counter = itertools.count()
3654

    
3655
  @staticmethod
3656
  def _IfName(name, fmt):
3657
    """Helper function for formatting name.
3658

3659
    """
3660
    if name:
3661
      return fmt % name
3662

    
3663
    return ""
3664

    
3665
  def QueueJob(self, name, *ops):
3666
    """Record a job for later submit.
3667

3668
    @type name: string
3669
    @param name: a description of the job, will be used in WaitJobSet
3670

3671
    """
3672
    SetGenericOpcodeOpts(ops, self.opts)
3673
    self.queue.append((self._counter.next(), name, ops))
3674

    
3675
  def AddJobId(self, name, status, job_id):
3676
    """Adds a job ID to the internal queue.
3677

3678
    """
3679
    self.jobs.append((self._counter.next(), status, job_id, name))
3680

    
3681
  def SubmitPending(self, each=False):
3682
    """Submit all pending jobs.
3683

3684
    """
3685
    if each:
3686
      results = []
3687
      for (_, _, ops) in self.queue:
3688
        # SubmitJob will remove the success status, but raise an exception if
3689
        # the submission fails, so we'll notice that anyway.
3690
        results.append([True, self.cl.SubmitJob(ops)[0]])
3691
    else:
3692
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3693
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3694
      self.jobs.append((idx, status, data, name))
3695

    
3696
  def _ChooseJob(self):
3697
    """Choose a non-waiting/queued job to poll next.
3698

3699
    """
3700
    assert self.jobs, "_ChooseJob called with empty job list"
3701

    
3702
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3703
                               ["status"])
3704
    assert result
3705

    
3706
    for job_data, status in zip(self.jobs, result):
3707
      if (isinstance(status, list) and status and
3708
          status[0] in (constants.JOB_STATUS_QUEUED,
3709
                        constants.JOB_STATUS_WAITING,
3710
                        constants.JOB_STATUS_CANCELING)):
3711
        # job is still present and waiting
3712
        continue
3713
      # good candidate found (either running job or lost job)
3714
      self.jobs.remove(job_data)
3715
      return job_data
3716

    
3717
    # no job found
3718
    return self.jobs.pop(0)
3719

    
3720
  def GetResults(self):
3721
    """Wait for and return the results of all jobs.
3722

3723
    @rtype: list
3724
    @return: list of tuples (success, job results), in the same order
3725
        as the submitted jobs; if a job has failed, instead of the result
3726
        there will be the error message
3727

3728
    """
3729
    if not self.jobs:
3730
      self.SubmitPending()
3731
    results = []
3732
    if self.verbose:
3733
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3734
      if ok_jobs:
3735
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3736

    
3737
    # first, remove any non-submitted jobs
3738
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3739
    for idx, _, jid, name in failures:
3740
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3741
      results.append((idx, False, jid))
3742

    
3743
    while self.jobs:
3744
      (idx, _, jid, name) = self._ChooseJob()
3745
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3746
      try:
3747
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3748
        success = True
3749
      except errors.JobLost, err:
3750
        _, job_result = FormatError(err)
3751
        ToStderr("Job %s%s has been archived, cannot check its result",
3752
                 jid, self._IfName(name, " for %s"))
3753
        success = False
3754
      except (errors.GenericError, rpcerr.ProtocolError), err:
3755
        _, job_result = FormatError(err)
3756
        success = False
3757
        # the error message will always be shown, verbose or not
3758
        ToStderr("Job %s%s has failed: %s",
3759
                 jid, self._IfName(name, " for %s"), job_result)
3760

    
3761
      results.append((idx, success, job_result))
3762

    
3763
    # sort based on the index, then drop it
3764
    results.sort()
3765
    results = [i[1:] for i in results]
3766

    
3767
    return results
3768

    
3769
  def WaitOrShow(self, wait):
3770
    """Wait for job results or only print the job IDs.
3771

3772
    @type wait: boolean
3773
    @param wait: whether to wait or not
3774

3775
    """
3776
    if wait:
3777
      return self.GetResults()
3778
    else:
3779
      if not self.jobs:
3780
        self.SubmitPending()
3781
      for _, status, result, name in self.jobs:
3782
        if status:
3783
          ToStdout("%s: %s", result, name)
3784
        else:
3785
          ToStderr("Failure for %s: %s", name, result)
3786
      return [row[1:3] for row in self.jobs]
3787

    
3788

    
3789
def FormatParamsDictInfo(param_dict, actual):
3790
  """Formats a parameter dictionary.
3791

3792
  @type param_dict: dict