Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ bc57fa8d

History | View | Annotate | Download (137.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HOTPLUG_OPT",
99
  "HVLIST_OPT",
100
  "HVOPTS_OPT",
101
  "HYPERVISOR_OPT",
102
  "IALLOCATOR_OPT",
103
  "DEFAULT_IALLOCATOR_OPT",
104
  "IDENTIFY_DEFAULTS_OPT",
105
  "IGNORE_CONSIST_OPT",
106
  "IGNORE_ERRORS_OPT",
107
  "IGNORE_FAILURES_OPT",
108
  "IGNORE_OFFLINE_OPT",
109
  "IGNORE_REMOVE_FAILURES_OPT",
110
  "IGNORE_SECONDARIES_OPT",
111
  "IGNORE_SIZE_OPT",
112
  "INCLUDEDEFAULTS_OPT",
113
  "INTERVAL_OPT",
114
  "MAC_PREFIX_OPT",
115
  "MAINTAIN_NODE_HEALTH_OPT",
116
  "MASTER_NETDEV_OPT",
117
  "MASTER_NETMASK_OPT",
118
  "MC_OPT",
119
  "MIGRATION_MODE_OPT",
120
  "MODIFY_ETCHOSTS_OPT",
121
  "NET_OPT",
122
  "NETWORK_OPT",
123
  "NETWORK6_OPT",
124
  "NEW_CLUSTER_CERT_OPT",
125
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
126
  "NEW_CONFD_HMAC_KEY_OPT",
127
  "NEW_RAPI_CERT_OPT",
128
  "NEW_PRIMARY_OPT",
129
  "NEW_SECONDARY_OPT",
130
  "NEW_SPICE_CERT_OPT",
131
  "NIC_PARAMS_OPT",
132
  "NOCONFLICTSCHECK_OPT",
133
  "NODE_FORCE_JOIN_OPT",
134
  "NODE_LIST_OPT",
135
  "NODE_PLACEMENT_OPT",
136
  "NODEGROUP_OPT",
137
  "NODE_PARAMS_OPT",
138
  "NODE_POWERED_OPT",
139
  "NOHDR_OPT",
140
  "NOIPCHECK_OPT",
141
  "NO_INSTALL_OPT",
142
  "NONAMECHECK_OPT",
143
  "NOMODIFY_ETCHOSTS_OPT",
144
  "NOMODIFY_SSH_SETUP_OPT",
145
  "NONICS_OPT",
146
  "NONLIVE_OPT",
147
  "NONPLUS1_OPT",
148
  "NORUNTIME_CHGS_OPT",
149
  "NOSHUTDOWN_OPT",
150
  "NOSTART_OPT",
151
  "NOSSH_KEYCHECK_OPT",
152
  "NOVOTING_OPT",
153
  "NO_REMEMBER_OPT",
154
  "NWSYNC_OPT",
155
  "OFFLINE_INST_OPT",
156
  "ONLINE_INST_OPT",
157
  "ON_PRIMARY_OPT",
158
  "ON_SECONDARY_OPT",
159
  "OFFLINE_OPT",
160
  "OSPARAMS_OPT",
161
  "OS_OPT",
162
  "OS_SIZE_OPT",
163
  "OOB_TIMEOUT_OPT",
164
  "POWER_DELAY_OPT",
165
  "PREALLOC_WIPE_DISKS_OPT",
166
  "PRIMARY_IP_VERSION_OPT",
167
  "PRIMARY_ONLY_OPT",
168
  "PRINT_JOBID_OPT",
169
  "PRIORITY_OPT",
170
  "RAPI_CERT_OPT",
171
  "READD_OPT",
172
  "REASON_OPT",
173
  "REBOOT_TYPE_OPT",
174
  "REMOVE_INSTANCE_OPT",
175
  "REMOVE_RESERVED_IPS_OPT",
176
  "REMOVE_UIDS_OPT",
177
  "RESERVED_LVS_OPT",
178
  "RUNTIME_MEM_OPT",
179
  "ROMAN_OPT",
180
  "SECONDARY_IP_OPT",
181
  "SECONDARY_ONLY_OPT",
182
  "SELECT_OS_OPT",
183
  "SEP_OPT",
184
  "SHOWCMD_OPT",
185
  "SHOW_MACHINE_OPT",
186
  "COMPRESS_OPT",
187
  "SHUTDOWN_TIMEOUT_OPT",
188
  "SINGLE_NODE_OPT",
189
  "SPECS_CPU_COUNT_OPT",
190
  "SPECS_DISK_COUNT_OPT",
191
  "SPECS_DISK_SIZE_OPT",
192
  "SPECS_MEM_SIZE_OPT",
193
  "SPECS_NIC_COUNT_OPT",
194
  "SPLIT_ISPECS_OPTS",
195
  "IPOLICY_STD_SPECS_OPT",
196
  "IPOLICY_DISK_TEMPLATES",
197
  "IPOLICY_VCPU_RATIO",
198
  "SPICE_CACERT_OPT",
199
  "SPICE_CERT_OPT",
200
  "SRC_DIR_OPT",
201
  "SRC_NODE_OPT",
202
  "SUBMIT_OPT",
203
  "SUBMIT_OPTS",
204
  "STARTUP_PAUSED_OPT",
205
  "STATIC_OPT",
206
  "SYNC_OPT",
207
  "TAG_ADD_OPT",
208
  "TAG_SRC_OPT",
209
  "TIMEOUT_OPT",
210
  "TO_GROUP_OPT",
211
  "UIDPOOL_OPT",
212
  "USEUNITS_OPT",
213
  "USE_EXTERNAL_MIP_SCRIPT",
214
  "USE_REPL_NET_OPT",
215
  "VERBOSE_OPT",
216
  "VG_NAME_OPT",
217
  "WFSYNC_OPT",
218
  "YES_DOIT_OPT",
219
  "DISK_STATE_OPT",
220
  "HV_STATE_OPT",
221
  "IGNORE_IPOLICY_OPT",
222
  "INSTANCE_POLICY_OPTS",
223
  # Generic functions for CLI programs
224
  "ConfirmOperation",
225
  "CreateIPolicyFromOpts",
226
  "GenericMain",
227
  "GenericInstanceCreate",
228
  "GenericList",
229
  "GenericListFields",
230
  "GetClient",
231
  "GetOnlineNodes",
232
  "GetNodesSshPorts",
233
  "JobExecutor",
234
  "JobSubmittedException",
235
  "ParseTimespec",
236
  "RunWhileClusterStopped",
237
  "SubmitOpCode",
238
  "SubmitOpCodeToDrainedQueue",
239
  "SubmitOrSend",
240
  "UsesRPC",
241
  # Formatting functions
242
  "ToStderr", "ToStdout",
243
  "FormatError",
244
  "FormatQueryResult",
245
  "FormatParamsDictInfo",
246
  "FormatPolicyInfo",
247
  "PrintIPolicyCommand",
248
  "PrintGenericInfo",
249
  "GenerateTable",
250
  "AskUser",
251
  "FormatTimestamp",
252
  "FormatLogMessage",
253
  # Tags functions
254
  "ListTags",
255
  "AddTags",
256
  "RemoveTags",
257
  # command line options support infrastructure
258
  "ARGS_MANY_INSTANCES",
259
  "ARGS_MANY_NODES",
260
  "ARGS_MANY_GROUPS",
261
  "ARGS_MANY_NETWORKS",
262
  "ARGS_NONE",
263
  "ARGS_ONE_INSTANCE",
264
  "ARGS_ONE_NODE",
265
  "ARGS_ONE_GROUP",
266
  "ARGS_ONE_OS",
267
  "ARGS_ONE_NETWORK",
268
  "ArgChoice",
269
  "ArgCommand",
270
  "ArgFile",
271
  "ArgGroup",
272
  "ArgHost",
273
  "ArgInstance",
274
  "ArgJobId",
275
  "ArgNetwork",
276
  "ArgNode",
277
  "ArgOs",
278
  "ArgExtStorage",
279
  "ArgSuggest",
280
  "ArgUnknown",
281
  "OPT_COMPL_INST_ADD_NODES",
282
  "OPT_COMPL_MANY_NODES",
283
  "OPT_COMPL_ONE_IALLOCATOR",
284
  "OPT_COMPL_ONE_INSTANCE",
285
  "OPT_COMPL_ONE_NODE",
286
  "OPT_COMPL_ONE_NODEGROUP",
287
  "OPT_COMPL_ONE_NETWORK",
288
  "OPT_COMPL_ONE_OS",
289
  "OPT_COMPL_ONE_EXTSTORAGE",
290
  "cli_option",
291
  "SplitNodeOption",
292
  "CalculateOSNames",
293
  "ParseFields",
294
  "COMMON_CREATE_OPTS",
295
  ]
296

    
297
NO_PREFIX = "no_"
298
UN_PREFIX = "-"
299

    
300
#: Priorities (sorted)
301
_PRIORITY_NAMES = [
302
  ("low", constants.OP_PRIO_LOW),
303
  ("normal", constants.OP_PRIO_NORMAL),
304
  ("high", constants.OP_PRIO_HIGH),
305
  ]
306

    
307
#: Priority dictionary for easier lookup
308
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
309
# we migrate to Python 2.6
310
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
311

    
312
# Query result status for clients
313
(QR_NORMAL,
314
 QR_UNKNOWN,
315
 QR_INCOMPLETE) = range(3)
316

    
317
#: Maximum batch size for ChooseJob
318
_CHOOSE_BATCH = 25
319

    
320

    
321
# constants used to create InstancePolicy dictionary
322
TISPECS_GROUP_TYPES = {
323
  constants.ISPECS_MIN: constants.VTYPE_INT,
324
  constants.ISPECS_MAX: constants.VTYPE_INT,
325
  }
326

    
327
TISPECS_CLUSTER_TYPES = {
328
  constants.ISPECS_MIN: constants.VTYPE_INT,
329
  constants.ISPECS_MAX: constants.VTYPE_INT,
330
  constants.ISPECS_STD: constants.VTYPE_INT,
331
  }
332

    
333
#: User-friendly names for query2 field types
334
_QFT_NAMES = {
335
  constants.QFT_UNKNOWN: "Unknown",
336
  constants.QFT_TEXT: "Text",
337
  constants.QFT_BOOL: "Boolean",
338
  constants.QFT_NUMBER: "Number",
339
  constants.QFT_UNIT: "Storage size",
340
  constants.QFT_TIMESTAMP: "Timestamp",
341
  constants.QFT_OTHER: "Custom",
342
  }
343

    
344

    
345
class _Argument:
346
  def __init__(self, min=0, max=None): # pylint: disable=W0622
347
    self.min = min
348
    self.max = max
349

    
350
  def __repr__(self):
351
    return ("<%s min=%s max=%s>" %
352
            (self.__class__.__name__, self.min, self.max))
353

    
354

    
355
class ArgSuggest(_Argument):
356
  """Suggesting argument.
357

358
  Value can be any of the ones passed to the constructor.
359

360
  """
361
  # pylint: disable=W0622
362
  def __init__(self, min=0, max=None, choices=None):
363
    _Argument.__init__(self, min=min, max=max)
364
    self.choices = choices
365

    
366
  def __repr__(self):
367
    return ("<%s min=%s max=%s choices=%r>" %
368
            (self.__class__.__name__, self.min, self.max, self.choices))
369

    
370

    
371
class ArgChoice(ArgSuggest):
372
  """Choice argument.
373

374
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
375
  but value must be one of the choices.
376

377
  """
378

    
379

    
380
class ArgUnknown(_Argument):
381
  """Unknown argument to program (e.g. determined at runtime).
382

383
  """
384

    
385

    
386
class ArgInstance(_Argument):
387
  """Instances argument.
388

389
  """
390

    
391

    
392
class ArgNode(_Argument):
393
  """Node argument.
394

395
  """
396

    
397

    
398
class ArgNetwork(_Argument):
399
  """Network argument.
400

401
  """
402

    
403

    
404
class ArgGroup(_Argument):
405
  """Node group argument.
406

407
  """
408

    
409

    
410
class ArgJobId(_Argument):
411
  """Job ID argument.
412

413
  """
414

    
415

    
416
class ArgFile(_Argument):
417
  """File path argument.
418

419
  """
420

    
421

    
422
class ArgCommand(_Argument):
423
  """Command argument.
424

425
  """
426

    
427

    
428
class ArgHost(_Argument):
429
  """Host argument.
430

431
  """
432

    
433

    
434
class ArgOs(_Argument):
435
  """OS argument.
436

437
  """
438

    
439

    
440
class ArgExtStorage(_Argument):
441
  """ExtStorage argument.
442

443
  """
444

    
445

    
446
ARGS_NONE = []
447
ARGS_MANY_INSTANCES = [ArgInstance()]
448
ARGS_MANY_NETWORKS = [ArgNetwork()]
449
ARGS_MANY_NODES = [ArgNode()]
450
ARGS_MANY_GROUPS = [ArgGroup()]
451
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
452
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
453
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
454
# TODO
455
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
456
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
457

    
458

    
459
def _ExtractTagsObject(opts, args):
460
  """Extract the tag type object.
461

462
  Note that this function will modify its args parameter.
463

464
  """
465
  if not hasattr(opts, "tag_type"):
466
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
467
  kind = opts.tag_type
468
  if kind == constants.TAG_CLUSTER:
469
    retval = kind, ""
470
  elif kind in (constants.TAG_NODEGROUP,
471
                constants.TAG_NODE,
472
                constants.TAG_NETWORK,
473
                constants.TAG_INSTANCE):
474
    if not args:
475
      raise errors.OpPrereqError("no arguments passed to the command",
476
                                 errors.ECODE_INVAL)
477
    name = args.pop(0)
478
    retval = kind, name
479
  else:
480
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
481
  return retval
482

    
483

    
484
def _ExtendTags(opts, args):
485
  """Extend the args if a source file has been given.
486

487
  This function will extend the tags with the contents of the file
488
  passed in the 'tags_source' attribute of the opts parameter. A file
489
  named '-' will be replaced by stdin.
490

491
  """
492
  fname = opts.tags_source
493
  if fname is None:
494
    return
495
  if fname == "-":
496
    new_fh = sys.stdin
497
  else:
498
    new_fh = open(fname, "r")
499
  new_data = []
500
  try:
501
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
502
    # because of python bug 1633941
503
    while True:
504
      line = new_fh.readline()
505
      if not line:
506
        break
507
      new_data.append(line.strip())
508
  finally:
509
    new_fh.close()
510
  args.extend(new_data)
511

    
512

    
513
def ListTags(opts, args):
514
  """List the tags on a given object.
515

516
  This is a generic implementation that knows how to deal with all
517
  three cases of tag objects (cluster, node, instance). The opts
518
  argument is expected to contain a tag_type field denoting what
519
  object type we work on.
520

521
  """
522
  kind, name = _ExtractTagsObject(opts, args)
523
  cl = GetClient(query=True)
524
  result = cl.QueryTags(kind, name)
525
  result = list(result)
526
  result.sort()
527
  for tag in result:
528
    ToStdout(tag)
529

    
530

    
531
def AddTags(opts, args):
532
  """Add tags on a given object.
533

534
  This is a generic implementation that knows how to deal with all
535
  three cases of tag objects (cluster, node, instance). The opts
536
  argument is expected to contain a tag_type field denoting what
537
  object type we work on.
538

539
  """
540
  kind, name = _ExtractTagsObject(opts, args)
541
  _ExtendTags(opts, args)
542
  if not args:
543
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
544
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
545
  SubmitOrSend(op, opts)
546

    
547

    
548
def RemoveTags(opts, args):
549
  """Remove tags from a given object.
550

551
  This is a generic implementation that knows how to deal with all
552
  three cases of tag objects (cluster, node, instance). The opts
553
  argument is expected to contain a tag_type field denoting what
554
  object type we work on.
555

556
  """
557
  kind, name = _ExtractTagsObject(opts, args)
558
  _ExtendTags(opts, args)
559
  if not args:
560
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
561
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
562
  SubmitOrSend(op, opts)
563

    
564

    
565
def check_unit(option, opt, value): # pylint: disable=W0613
566
  """OptParsers custom converter for units.
567

568
  """
569
  try:
570
    return utils.ParseUnit(value)
571
  except errors.UnitParseError, err:
572
    raise OptionValueError("option %s: %s" % (opt, err))
573

    
574

    
575
def _SplitKeyVal(opt, data, parse_prefixes):
576
  """Convert a KeyVal string into a dict.
577

578
  This function will convert a key=val[,...] string into a dict. Empty
579
  values will be converted specially: keys which have the prefix 'no_'
580
  will have the value=False and the prefix stripped, keys with the prefix
581
  "-" will have value=None and the prefix stripped, and the others will
582
  have value=True.
583

584
  @type opt: string
585
  @param opt: a string holding the option name for which we process the
586
      data, used in building error messages
587
  @type data: string
588
  @param data: a string of the format key=val,key=val,...
589
  @type parse_prefixes: bool
590
  @param parse_prefixes: whether to handle prefixes specially
591
  @rtype: dict
592
  @return: {key=val, key=val}
593
  @raises errors.ParameterError: if there are duplicate keys
594

595
  """
596
  kv_dict = {}
597
  if data:
598
    for elem in utils.UnescapeAndSplit(data, sep=","):
599
      if "=" in elem:
600
        key, val = elem.split("=", 1)
601
      elif parse_prefixes:
602
        if elem.startswith(NO_PREFIX):
603
          key, val = elem[len(NO_PREFIX):], False
604
        elif elem.startswith(UN_PREFIX):
605
          key, val = elem[len(UN_PREFIX):], None
606
        else:
607
          key, val = elem, True
608
      else:
609
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
610
                                    (elem, opt))
611
      if key in kv_dict:
612
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
613
                                    (key, opt))
614
      kv_dict[key] = val
615
  return kv_dict
616

    
617

    
618
def _SplitIdentKeyVal(opt, value, parse_prefixes):
619
  """Helper function to parse "ident:key=val,key=val" options.
620

621
  @type opt: string
622
  @param opt: option name, used in error messages
623
  @type value: string
624
  @param value: expected to be in the format "ident:key=val,key=val,..."
625
  @type parse_prefixes: bool
626
  @param parse_prefixes: whether to handle prefixes specially (see
627
      L{_SplitKeyVal})
628
  @rtype: tuple
629
  @return: (ident, {key=val, key=val})
630
  @raises errors.ParameterError: in case of duplicates or other parsing errors
631

632
  """
633
  if ":" not in value:
634
    ident, rest = value, ""
635
  else:
636
    ident, rest = value.split(":", 1)
637

    
638
  if parse_prefixes and ident.startswith(NO_PREFIX):
639
    if rest:
640
      msg = "Cannot pass options when removing parameter groups: %s" % value
641
      raise errors.ParameterError(msg)
642
    retval = (ident[len(NO_PREFIX):], False)
643
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
644
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
645
    if rest:
646
      msg = "Cannot pass options when removing parameter groups: %s" % value
647
      raise errors.ParameterError(msg)
648
    retval = (ident[len(UN_PREFIX):], None)
649
  else:
650
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
651
    retval = (ident, kv_dict)
652
  return retval
653

    
654

    
655
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
656
  """Custom parser for ident:key=val,key=val options.
657

658
  This will store the parsed values as a tuple (ident, {key: val}). As such,
659
  multiple uses of this option via action=append is possible.
660

661
  """
662
  return _SplitIdentKeyVal(opt, value, True)
663

    
664

    
665
def check_key_val(option, opt, value):  # pylint: disable=W0613
666
  """Custom parser class for key=val,key=val options.
667

668
  This will store the parsed values as a dict {key: val}.
669

670
  """
671
  return _SplitKeyVal(opt, value, True)
672

    
673

    
674
def _SplitListKeyVal(opt, value):
675
  retval = {}
676
  for elem in value.split("/"):
677
    if not elem:
678
      raise errors.ParameterError("Empty section in option '%s'" % opt)
679
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
680
    if ident in retval:
681
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
682
             (ident, opt, elem))
683
      raise errors.ParameterError(msg)
684
    retval[ident] = valdict
685
  return retval
686

    
687

    
688
def check_multilist_ident_key_val(_, opt, value):
689
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
690

691
  @rtype: list of dictionary
692
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
693

694
  """
695
  retval = []
696
  for line in value.split("//"):
697
    retval.append(_SplitListKeyVal(opt, line))
698
  return retval
699

    
700

    
701
def check_bool(option, opt, value): # pylint: disable=W0613
702
  """Custom parser for yes/no options.
703

704
  This will store the parsed value as either True or False.
705

706
  """
707
  value = value.lower()
708
  if value == constants.VALUE_FALSE or value == "no":
709
    return False
710
  elif value == constants.VALUE_TRUE or value == "yes":
711
    return True
712
  else:
713
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
714

    
715

    
716
def check_list(option, opt, value): # pylint: disable=W0613
717
  """Custom parser for comma-separated lists.
718

719
  """
720
  # we have to make this explicit check since "".split(",") is [""],
721
  # not an empty list :(
722
  if not value:
723
    return []
724
  else:
725
    return utils.UnescapeAndSplit(value)
726

    
727

    
728
def check_maybefloat(option, opt, value): # pylint: disable=W0613
729
  """Custom parser for float numbers which might be also defaults.
730

731
  """
732
  value = value.lower()
733

    
734
  if value == constants.VALUE_DEFAULT:
735
    return value
736
  else:
737
    return float(value)
738

    
739

    
740
# completion_suggestion is normally a list. Using numeric values not evaluating
741
# to False for dynamic completion.
742
(OPT_COMPL_MANY_NODES,
743
 OPT_COMPL_ONE_NODE,
744
 OPT_COMPL_ONE_INSTANCE,
745
 OPT_COMPL_ONE_OS,
746
 OPT_COMPL_ONE_EXTSTORAGE,
747
 OPT_COMPL_ONE_IALLOCATOR,
748
 OPT_COMPL_ONE_NETWORK,
749
 OPT_COMPL_INST_ADD_NODES,
750
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
751

    
752
OPT_COMPL_ALL = compat.UniqueFrozenset([
753
  OPT_COMPL_MANY_NODES,
754
  OPT_COMPL_ONE_NODE,
755
  OPT_COMPL_ONE_INSTANCE,
756
  OPT_COMPL_ONE_OS,
757
  OPT_COMPL_ONE_EXTSTORAGE,
758
  OPT_COMPL_ONE_IALLOCATOR,
759
  OPT_COMPL_ONE_NETWORK,
760
  OPT_COMPL_INST_ADD_NODES,
761
  OPT_COMPL_ONE_NODEGROUP,
762
  ])
763

    
764

    
765
class CliOption(Option):
766
  """Custom option class for optparse.
767

768
  """
769
  ATTRS = Option.ATTRS + [
770
    "completion_suggest",
771
    ]
772
  TYPES = Option.TYPES + (
773
    "multilistidentkeyval",
774
    "identkeyval",
775
    "keyval",
776
    "unit",
777
    "bool",
778
    "list",
779
    "maybefloat",
780
    )
781
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
782
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
783
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
784
  TYPE_CHECKER["keyval"] = check_key_val
785
  TYPE_CHECKER["unit"] = check_unit
786
  TYPE_CHECKER["bool"] = check_bool
787
  TYPE_CHECKER["list"] = check_list
788
  TYPE_CHECKER["maybefloat"] = check_maybefloat
789

    
790

    
791
# optparse.py sets make_option, so we do it for our own option class, too
792
cli_option = CliOption
793

    
794

    
795
_YORNO = "yes|no"
796

    
797
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
798
                       help="Increase debugging level")
799

    
800
NOHDR_OPT = cli_option("--no-headers", default=False,
801
                       action="store_true", dest="no_headers",
802
                       help="Don't display column headers")
803

    
804
SEP_OPT = cli_option("--separator", default=None,
805
                     action="store", dest="separator",
806
                     help=("Separator between output fields"
807
                           " (defaults to one space)"))
808

    
809
USEUNITS_OPT = cli_option("--units", default=None,
810
                          dest="units", choices=("h", "m", "g", "t"),
811
                          help="Specify units for output (one of h/m/g/t)")
812

    
813
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
814
                        type="string", metavar="FIELDS",
815
                        help="Comma separated list of output fields")
816

    
817
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
818
                       default=False, help="Force the operation")
819

    
820
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
821
                         default=False, help="Do not require confirmation")
822

    
823
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
824
                                  action="store_true", default=False,
825
                                  help=("Ignore offline nodes and do as much"
826
                                        " as possible"))
827

    
828
TAG_ADD_OPT = cli_option("--tags", dest="tags",
829
                         default=None, help="Comma-separated list of instance"
830
                                            " tags")
831

    
832
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
833
                         default=None, help="File with tag names")
834

    
835
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
836
                        default=False, action="store_true",
837
                        help=("Submit the job and return the job ID, but"
838
                              " don't wait for the job to finish"))
839

    
840
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
841
                             default=False, action="store_true",
842
                             help=("Additionally print the job as first line"
843
                                   " on stdout (for scripting)."))
844

    
845
SYNC_OPT = cli_option("--sync", dest="do_locking",
846
                      default=False, action="store_true",
847
                      help=("Grab locks while doing the queries"
848
                            " in order to ensure more consistent results"))
849

    
850
DRY_RUN_OPT = cli_option("--dry-run", default=False,
851
                         action="store_true",
852
                         help=("Do not execute the operation, just run the"
853
                               " check steps and verify if it could be"
854
                               " executed"))
855

    
856
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
857
                         action="store_true",
858
                         help="Increase the verbosity of the operation")
859

    
860
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
861
                              action="store_true", dest="simulate_errors",
862
                              help="Debugging option that makes the operation"
863
                              " treat most runtime checks as failed")
864

    
865
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
866
                        default=True, action="store_false",
867
                        help="Don't wait for sync (DANGEROUS!)")
868

    
869
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
870
                        default=False, action="store_true",
871
                        help="Wait for disks to sync")
872

    
873
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
874
                             action="store_true", default=False,
875
                             help="Enable offline instance")
876

    
877
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
878
                              action="store_true", default=False,
879
                              help="Disable down instance")
880

    
881
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
882
                               help=("Custom disk setup (%s)" %
883
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
884
                               default=None, metavar="TEMPL",
885
                               choices=list(constants.DISK_TEMPLATES))
886

    
887
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
888
                        help="Do not create any network cards for"
889
                        " the instance")
890

    
891
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
892
                               help="Relative path under default cluster-wide"
893
                               " file storage dir to store file-based disks",
894
                               default=None, metavar="<DIR>")
895

    
896
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
897
                                  help="Driver to use for image files",
898
                                  default=None, metavar="<DRIVER>",
899
                                  choices=list(constants.FILE_DRIVER))
900

    
901
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
902
                            help="Select nodes for the instance automatically"
903
                            " using the <NAME> iallocator plugin",
904
                            default=None, type="string",
905
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
906

    
907
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
908
                                    metavar="<NAME>",
909
                                    help="Set the default instance"
910
                                    " allocator plugin",
911
                                    default=None, type="string",
912
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
913

    
914
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
915
                    metavar="<os>",
916
                    completion_suggest=OPT_COMPL_ONE_OS)
917

    
918
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
919
                          type="keyval", default={},
920
                          help="OS parameters")
921

    
922
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
923
                               action="store_true", default=False,
924
                               help="Force an unknown variant")
925

    
926
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
927
                            action="store_true", default=False,
928
                            help="Do not install the OS (will"
929
                            " enable no-start)")
930

    
931
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
932
                                dest="allow_runtime_chgs",
933
                                default=True, action="store_false",
934
                                help="Don't allow runtime changes")
935

    
936
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
937
                         type="keyval", default={},
938
                         help="Backend parameters")
939

    
940
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
941
                        default={}, dest="hvparams",
942
                        help="Hypervisor parameters")
943

    
944
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
945
                             help="Disk template parameters, in the format"
946
                             " template:option=value,option=value,...",
947
                             type="identkeyval", action="append", default=[])
948

    
949
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
950
                                 type="keyval", default={},
951
                                 help="Memory size specs: list of key=value,"
952
                                " where key is one of min, max, std"
953
                                 " (in MB or using a unit)")
954

    
955
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
956
                                 type="keyval", default={},
957
                                 help="CPU count specs: list of key=value,"
958
                                 " where key is one of min, max, std")
959

    
960
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
961
                                  dest="ispecs_disk_count",
962
                                  type="keyval", default={},
963
                                  help="Disk count specs: list of key=value,"
964
                                  " where key is one of min, max, std")
965

    
966
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
967
                                 type="keyval", default={},
968
                                 help="Disk size specs: list of key=value,"
969
                                 " where key is one of min, max, std"
970
                                 " (in MB or using a unit)")
971

    
972
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
973
                                 type="keyval", default={},
974
                                 help="NIC count specs: list of key=value,"
975
                                 " where key is one of min, max, std")
976

    
977
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
978
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
979
                                      dest="ipolicy_bounds_specs",
980
                                      type="multilistidentkeyval", default=None,
981
                                      help="Complete instance specs limits")
982

    
983
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
984
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
985
                                   dest="ipolicy_std_specs",
986
                                   type="keyval", default=None,
987
                                   help="Complte standard instance specs")
988

    
989
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
990
                                    dest="ipolicy_disk_templates",
991
                                    type="list", default=None,
992
                                    help="Comma-separated list of"
993
                                    " enabled disk templates")
994

    
995
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
996
                                 dest="ipolicy_vcpu_ratio",
997
                                 type="maybefloat", default=None,
998
                                 help="The maximum allowed vcpu-to-cpu ratio")
999

    
1000
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1001
                                   dest="ipolicy_spindle_ratio",
1002
                                   type="maybefloat", default=None,
1003
                                   help=("The maximum allowed instances to"
1004
                                         " spindle ratio"))
1005

    
1006
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1007
                            help="Hypervisor and hypervisor options, in the"
1008
                            " format hypervisor:option=value,option=value,...",
1009
                            default=None, type="identkeyval")
1010

    
1011
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1012
                        help="Hypervisor and hypervisor options, in the"
1013
                        " format hypervisor:option=value,option=value,...",
1014
                        default=[], action="append", type="identkeyval")
1015

    
1016
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1017
                           action="store_false",
1018
                           help="Don't check that the instance's IP"
1019
                           " is alive")
1020

    
1021
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1022
                             default=True, action="store_false",
1023
                             help="Don't check that the instance's name"
1024
                             " is resolvable")
1025

    
1026
NET_OPT = cli_option("--net",
1027
                     help="NIC parameters", default=[],
1028
                     dest="nics", action="append", type="identkeyval")
1029

    
1030
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1031
                      dest="disks", action="append", type="identkeyval")
1032

    
1033
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1034
                         help="Comma-separated list of disks"
1035
                         " indices to act on (e.g. 0,2) (optional,"
1036
                         " defaults to all disks)")
1037

    
1038
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1039
                         help="Enforces a single-disk configuration using the"
1040
                         " given disk size, in MiB unless a suffix is used",
1041
                         default=None, type="unit", metavar="<size>")
1042

    
1043
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1044
                                dest="ignore_consistency",
1045
                                action="store_true", default=False,
1046
                                help="Ignore the consistency of the disks on"
1047
                                " the secondary")
1048

    
1049
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1050
                                dest="allow_failover",
1051
                                action="store_true", default=False,
1052
                                help="If migration is not possible fallback to"
1053
                                     " failover")
1054

    
1055
NONLIVE_OPT = cli_option("--non-live", dest="live",
1056
                         default=True, action="store_false",
1057
                         help="Do a non-live migration (this usually means"
1058
                         " freeze the instance, save the state, transfer and"
1059
                         " only then resume running on the secondary node)")
1060

    
1061
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1062
                                default=None,
1063
                                choices=list(constants.HT_MIGRATION_MODES),
1064
                                help="Override default migration mode (choose"
1065
                                " either live or non-live")
1066

    
1067
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1068
                                help="Target node and optional secondary node",
1069
                                metavar="<pnode>[:<snode>]",
1070
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1071

    
1072
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1073
                           action="append", metavar="<node>",
1074
                           help="Use only this node (can be used multiple"
1075
                           " times, if not given defaults to all nodes)",
1076
                           completion_suggest=OPT_COMPL_ONE_NODE)
1077

    
1078
NODEGROUP_OPT_NAME = "--node-group"
1079
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1080
                           dest="nodegroup",
1081
                           help="Node group (name or uuid)",
1082
                           metavar="<nodegroup>",
1083
                           default=None, type="string",
1084
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1085

    
1086
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1087
                             metavar="<node>",
1088
                             completion_suggest=OPT_COMPL_ONE_NODE)
1089

    
1090
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1091
                         action="store_false",
1092
                         help="Don't start the instance after creation")
1093

    
1094
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1095
                         action="store_true", default=False,
1096
                         help="Show command instead of executing it")
1097

    
1098
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1099
                         default=False, action="store_true",
1100
                         help="Instead of performing the migration/failover,"
1101
                         " try to recover from a failed cleanup. This is safe"
1102
                         " to run even if the instance is healthy, but it"
1103
                         " will create extra replication traffic and "
1104
                         " disrupt briefly the replication (like during the"
1105
                         " migration/failover")
1106

    
1107
STATIC_OPT = cli_option("-s", "--static", dest="static",
1108
                        action="store_true", default=False,
1109
                        help="Only show configuration data, not runtime data")
1110

    
1111
ALL_OPT = cli_option("--all", dest="show_all",
1112
                     default=False, action="store_true",
1113
                     help="Show info on all instances on the cluster."
1114
                     " This can take a long time to run, use wisely")
1115

    
1116
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1117
                           action="store_true", default=False,
1118
                           help="Interactive OS reinstall, lists available"
1119
                           " OS templates for selection")
1120

    
1121
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1122
                                 action="store_true", default=False,
1123
                                 help="Remove the instance from the cluster"
1124
                                 " configuration even if there are failures"
1125
                                 " during the removal process")
1126

    
1127
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1128
                                        dest="ignore_remove_failures",
1129
                                        action="store_true", default=False,
1130
                                        help="Remove the instance from the"
1131
                                        " cluster configuration even if there"
1132
                                        " are failures during the removal"
1133
                                        " process")
1134

    
1135
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1136
                                 action="store_true", default=False,
1137
                                 help="Remove the instance from the cluster")
1138

    
1139
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1140
                               help="Specifies the new node for the instance",
1141
                               metavar="NODE", default=None,
1142
                               completion_suggest=OPT_COMPL_ONE_NODE)
1143

    
1144
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1145
                               help="Specifies the new secondary node",
1146
                               metavar="NODE", default=None,
1147
                               completion_suggest=OPT_COMPL_ONE_NODE)
1148

    
1149
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1150
                             help="Specifies the new primary node",
1151
                             metavar="<node>", default=None,
1152
                             completion_suggest=OPT_COMPL_ONE_NODE)
1153

    
1154
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1155
                            default=False, action="store_true",
1156
                            help="Replace the disk(s) on the primary"
1157
                                 " node (applies only to internally mirrored"
1158
                                 " disk templates, e.g. %s)" %
1159
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1160

    
1161
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1162
                              default=False, action="store_true",
1163
                              help="Replace the disk(s) on the secondary"
1164
                                   " node (applies only to internally mirrored"
1165
                                   " disk templates, e.g. %s)" %
1166
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1167

    
1168
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1169
                              default=False, action="store_true",
1170
                              help="Lock all nodes and auto-promote as needed"
1171
                              " to MC status")
1172

    
1173
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1174
                              default=False, action="store_true",
1175
                              help="Automatically replace faulty disks"
1176
                                   " (applies only to internally mirrored"
1177
                                   " disk templates, e.g. %s)" %
1178
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1179

    
1180
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1181
                             default=False, action="store_true",
1182
                             help="Ignore current recorded size"
1183
                             " (useful for forcing activation when"
1184
                             " the recorded size is wrong)")
1185

    
1186
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1187
                          metavar="<node>",
1188
                          completion_suggest=OPT_COMPL_ONE_NODE)
1189

    
1190
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1191
                         metavar="<dir>")
1192

    
1193
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1194
                              help="Specify the secondary ip for the node",
1195
                              metavar="ADDRESS", default=None)
1196

    
1197
READD_OPT = cli_option("--readd", dest="readd",
1198
                       default=False, action="store_true",
1199
                       help="Readd old node after replacing it")
1200

    
1201
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1202
                                default=True, action="store_false",
1203
                                help="Disable SSH key fingerprint checking")
1204

    
1205
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1206
                                 default=False, action="store_true",
1207
                                 help="Force the joining of a node")
1208

    
1209
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1210
                    type="bool", default=None, metavar=_YORNO,
1211
                    help="Set the master_candidate flag on the node")
1212

    
1213
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1214
                         type="bool", default=None,
1215
                         help=("Set the offline flag on the node"
1216
                               " (cluster does not communicate with offline"
1217
                               " nodes)"))
1218

    
1219
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1220
                         type="bool", default=None,
1221
                         help=("Set the drained flag on the node"
1222
                               " (excluded from allocation operations)"))
1223

    
1224
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1225
                              type="bool", default=None, metavar=_YORNO,
1226
                              help="Set the master_capable flag on the node")
1227

    
1228
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1229
                          type="bool", default=None, metavar=_YORNO,
1230
                          help="Set the vm_capable flag on the node")
1231

    
1232
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1233
                             type="bool", default=None, metavar=_YORNO,
1234
                             help="Set the allocatable flag on a volume")
1235

    
1236
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1237
                            dest="enabled_hypervisors",
1238
                            help="Comma-separated list of hypervisors",
1239
                            type="string", default=None)
1240

    
1241
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1242
                                        dest="enabled_disk_templates",
1243
                                        help="Comma-separated list of "
1244
                                             "disk templates",
1245
                                        type="string", default=None)
1246

    
1247
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1248
                            type="keyval", default={},
1249
                            help="NIC parameters")
1250

    
1251
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1252
                         dest="candidate_pool_size", type="int",
1253
                         help="Set the candidate pool size")
1254

    
1255
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1256
                         help=("Enables LVM and specifies the volume group"
1257
                               " name (cluster-wide) for disk allocation"
1258
                               " [%s]" % constants.DEFAULT_VG),
1259
                         metavar="VG", default=None)
1260

    
1261
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1262
                          help="Destroy cluster", action="store_true")
1263

    
1264
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1265
                          help="Skip node agreement check (dangerous)",
1266
                          action="store_true", default=False)
1267

    
1268
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1269
                            help="Specify the mac prefix for the instance IP"
1270
                            " addresses, in the format XX:XX:XX",
1271
                            metavar="PREFIX",
1272
                            default=None)
1273

    
1274
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1275
                               help="Specify the node interface (cluster-wide)"
1276
                               " on which the master IP address will be added"
1277
                               " (cluster init default: %s)" %
1278
                               constants.DEFAULT_BRIDGE,
1279
                               metavar="NETDEV",
1280
                               default=None)
1281

    
1282
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1283
                                help="Specify the netmask of the master IP",
1284
                                metavar="NETMASK",
1285
                                default=None)
1286

    
1287
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1288
                                     dest="use_external_mip_script",
1289
                                     help="Specify whether to run a"
1290
                                     " user-provided script for the master"
1291
                                     " IP address turnup and"
1292
                                     " turndown operations",
1293
                                     type="bool", metavar=_YORNO, default=None)
1294

    
1295
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1296
                                help="Specify the default directory (cluster-"
1297
                                "wide) for storing the file-based disks [%s]" %
1298
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1299
                                metavar="DIR",
1300
                                default=None)
1301

    
1302
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1303
  "--shared-file-storage-dir",
1304
  dest="shared_file_storage_dir",
1305
  help="Specify the default directory (cluster-wide) for storing the"
1306
  " shared file-based disks [%s]" %
1307
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1308
  metavar="SHAREDDIR", default=None)
1309

    
1310
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1311
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1312
                                   action="store_false", default=True)
1313

    
1314
MODIFY_ETCHOSTS_OPT = \
1315
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1316
            default=None, type="bool",
1317
            help="Defines whether the cluster should autonomously modify"
1318
            " and keep in sync the /etc/hosts file of the nodes")
1319

    
1320
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1321
                                    help="Don't initialize SSH keys",
1322
                                    action="store_false", default=True)
1323

    
1324
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1325
                             help="Enable parseable error messages",
1326
                             action="store_true", default=False)
1327

    
1328
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1329
                          help="Skip N+1 memory redundancy tests",
1330
                          action="store_true", default=False)
1331

    
1332
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1333
                             help="Type of reboot: soft/hard/full",
1334
                             default=constants.INSTANCE_REBOOT_HARD,
1335
                             metavar="<REBOOT>",
1336
                             choices=list(constants.REBOOT_TYPES))
1337

    
1338
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1339
                                    dest="ignore_secondaries",
1340
                                    default=False, action="store_true",
1341
                                    help="Ignore errors from secondaries")
1342

    
1343
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1344
                            action="store_false", default=True,
1345
                            help="Don't shutdown the instance (unsafe)")
1346

    
1347
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1348
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1349
                         help="Maximum time to wait")
1350

    
1351
COMPRESS_OPT = cli_option("--compress", dest="compress",
1352
                          default=constants.IEC_NONE,
1353
                          help="The compression mode to use",
1354
                          choices=list(constants.IEC_ALL))
1355

    
1356
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1357
                                  dest="shutdown_timeout", type="int",
1358
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1359
                                  help="Maximum time to wait for instance"
1360
                                  " shutdown")
1361

    
1362
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1363
                          default=None,
1364
                          help=("Number of seconds between repetions of the"
1365
                                " command"))
1366

    
1367
EARLY_RELEASE_OPT = cli_option("--early-release",
1368
                               dest="early_release", default=False,
1369
                               action="store_true",
1370
                               help="Release the locks on the secondary"
1371
                               " node(s) early")
1372

    
1373
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1374
                                  dest="new_cluster_cert",
1375
                                  default=False, action="store_true",
1376
                                  help="Generate a new cluster certificate")
1377

    
1378
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1379
                           default=None,
1380
                           help="File containing new RAPI certificate")
1381

    
1382
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1383
                               default=None, action="store_true",
1384
                               help=("Generate a new self-signed RAPI"
1385
                                     " certificate"))
1386

    
1387
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1388
                            default=None,
1389
                            help="File containing new SPICE certificate")
1390

    
1391
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1392
                              default=None,
1393
                              help="File containing the certificate of the CA"
1394
                              " which signed the SPICE certificate")
1395

    
1396
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1397
                                dest="new_spice_cert", default=None,
1398
                                action="store_true",
1399
                                help=("Generate a new self-signed SPICE"
1400
                                      " certificate"))
1401

    
1402
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1403
                                    dest="new_confd_hmac_key",
1404
                                    default=False, action="store_true",
1405
                                    help=("Create a new HMAC key for %s" %
1406
                                          constants.CONFD))
1407

    
1408
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1409
                                       dest="cluster_domain_secret",
1410
                                       default=None,
1411
                                       help=("Load new new cluster domain"
1412
                                             " secret from file"))
1413

    
1414
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1415
                                           dest="new_cluster_domain_secret",
1416
                                           default=False, action="store_true",
1417
                                           help=("Create a new cluster domain"
1418
                                                 " secret"))
1419

    
1420
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1421
                              dest="use_replication_network",
1422
                              help="Whether to use the replication network"
1423
                              " for talking to the nodes",
1424
                              action="store_true", default=False)
1425

    
1426
MAINTAIN_NODE_HEALTH_OPT = \
1427
    cli_option("--maintain-node-health", dest="maintain_node_health",
1428
               metavar=_YORNO, default=None, type="bool",
1429
               help="Configure the cluster to automatically maintain node"
1430
               " health, by shutting down unknown instances, shutting down"
1431
               " unknown DRBD devices, etc.")
1432

    
1433
IDENTIFY_DEFAULTS_OPT = \
1434
    cli_option("--identify-defaults", dest="identify_defaults",
1435
               default=False, action="store_true",
1436
               help="Identify which saved instance parameters are equal to"
1437
               " the current cluster defaults and set them as such, instead"
1438
               " of marking them as overridden")
1439

    
1440
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1441
                         action="store", dest="uid_pool",
1442
                         help=("A list of user-ids or user-id"
1443
                               " ranges separated by commas"))
1444

    
1445
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1446
                          action="store", dest="add_uids",
1447
                          help=("A list of user-ids or user-id"
1448
                                " ranges separated by commas, to be"
1449
                                " added to the user-id pool"))
1450

    
1451
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1452
                             action="store", dest="remove_uids",
1453
                             help=("A list of user-ids or user-id"
1454
                                   " ranges separated by commas, to be"
1455
                                   " removed from the user-id pool"))
1456

    
1457
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1458
                              action="store", dest="reserved_lvs",
1459
                              help=("A comma-separated list of reserved"
1460
                                    " logical volumes names, that will be"
1461
                                    " ignored by cluster verify"))
1462

    
1463
ROMAN_OPT = cli_option("--roman",
1464
                       dest="roman_integers", default=False,
1465
                       action="store_true",
1466
                       help="Use roman numbers for positive integers")
1467

    
1468
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1469
                             action="store", default=None,
1470
                             help="Specifies usermode helper for DRBD")
1471

    
1472
PRIMARY_IP_VERSION_OPT = \
1473
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1474
               action="store", dest="primary_ip_version",
1475
               metavar="%d|%d" % (constants.IP4_VERSION,
1476
                                  constants.IP6_VERSION),
1477
               help="Cluster-wide IP version for primary IP")
1478

    
1479
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1480
                              action="store_true",
1481
                              help="Show machine name for every line in output")
1482

    
1483
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1484
                              action="store_true",
1485
                              help=("Hide successful results and show failures"
1486
                                    " only (determined by the exit code)"))
1487

    
1488
REASON_OPT = cli_option("--reason", default=None,
1489
                        help="The reason for executing the command")
1490

    
1491

    
1492
def _PriorityOptionCb(option, _, value, parser):
1493
  """Callback for processing C{--priority} option.
1494

1495
  """
1496
  value = _PRIONAME_TO_VALUE[value]
1497

    
1498
  setattr(parser.values, option.dest, value)
1499

    
1500

    
1501
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1502
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1503
                          choices=_PRIONAME_TO_VALUE.keys(),
1504
                          action="callback", type="choice",
1505
                          callback=_PriorityOptionCb,
1506
                          help="Priority for opcode processing")
1507

    
1508
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1509
                        type="bool", default=None, metavar=_YORNO,
1510
                        help="Sets the hidden flag on the OS")
1511

    
1512
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1513
                        type="bool", default=None, metavar=_YORNO,
1514
                        help="Sets the blacklisted flag on the OS")
1515

    
1516
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1517
                                     type="bool", metavar=_YORNO,
1518
                                     dest="prealloc_wipe_disks",
1519
                                     help=("Wipe disks prior to instance"
1520
                                           " creation"))
1521

    
1522
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1523
                             type="keyval", default=None,
1524
                             help="Node parameters")
1525

    
1526
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1527
                              action="store", metavar="POLICY", default=None,
1528
                              help="Allocation policy for the node group")
1529

    
1530
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1531
                              type="bool", metavar=_YORNO,
1532
                              dest="node_powered",
1533
                              help="Specify if the SoR for node is powered")
1534

    
1535
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1536
                             default=constants.OOB_TIMEOUT,
1537
                             help="Maximum time to wait for out-of-band helper")
1538

    
1539
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1540
                             default=constants.OOB_POWER_DELAY,
1541
                             help="Time in seconds to wait between power-ons")
1542

    
1543
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1544
                              action="store_true", default=False,
1545
                              help=("Whether command argument should be treated"
1546
                                    " as filter"))
1547

    
1548
NO_REMEMBER_OPT = cli_option("--no-remember",
1549
                             dest="no_remember",
1550
                             action="store_true", default=False,
1551
                             help="Perform but do not record the change"
1552
                             " in the configuration")
1553

    
1554
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1555
                              default=False, action="store_true",
1556
                              help="Evacuate primary instances only")
1557

    
1558
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1559
                                default=False, action="store_true",
1560
                                help="Evacuate secondary instances only"
1561
                                     " (applies only to internally mirrored"
1562
                                     " disk templates, e.g. %s)" %
1563
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1564

    
1565
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1566
                                action="store_true", default=False,
1567
                                help="Pause instance at startup")
1568

    
1569
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1570
                          help="Destination node group (name or uuid)",
1571
                          default=None, action="append",
1572
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1573

    
1574
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1575
                               action="append", dest="ignore_errors",
1576
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1577
                               help="Error code to be ignored")
1578

    
1579
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1580
                            action="append",
1581
                            help=("Specify disk state information in the"
1582
                                  " format"
1583
                                  " storage_type/identifier:option=value,...;"
1584
                                  " note this is unused for now"),
1585
                            type="identkeyval")
1586

    
1587
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1588
                          action="append",
1589
                          help=("Specify hypervisor state information in the"
1590
                                " format hypervisor:option=value,...;"
1591
                                " note this is unused for now"),
1592
                          type="identkeyval")
1593

    
1594
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1595
                                action="store_true", default=False,
1596
                                help="Ignore instance policy violations")
1597

    
1598
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1599
                             help="Sets the instance's runtime memory,"
1600
                             " ballooning it up or down to the new value",
1601
                             default=None, type="unit", metavar="<size>")
1602

    
1603
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1604
                          action="store_true", default=False,
1605
                          help="Marks the grow as absolute instead of the"
1606
                          " (default) relative mode")
1607

    
1608
NETWORK_OPT = cli_option("--network",
1609
                         action="store", default=None, dest="network",
1610
                         help="IP network in CIDR notation")
1611

    
1612
GATEWAY_OPT = cli_option("--gateway",
1613
                         action="store", default=None, dest="gateway",
1614
                         help="IP address of the router (gateway)")
1615

    
1616
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1617
                                  action="store", default=None,
1618
                                  dest="add_reserved_ips",
1619
                                  help="Comma-separated list of"
1620
                                  " reserved IPs to add")
1621

    
1622
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1623
                                     action="store", default=None,
1624
                                     dest="remove_reserved_ips",
1625
                                     help="Comma-delimited list of"
1626
                                     " reserved IPs to remove")
1627

    
1628
NETWORK6_OPT = cli_option("--network6",
1629
                          action="store", default=None, dest="network6",
1630
                          help="IP network in CIDR notation")
1631

    
1632
GATEWAY6_OPT = cli_option("--gateway6",
1633
                          action="store", default=None, dest="gateway6",
1634
                          help="IP6 address of the router (gateway)")
1635

    
1636
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1637
                                  dest="conflicts_check",
1638
                                  default=True,
1639
                                  action="store_false",
1640
                                  help="Don't check for conflicting IPs")
1641

    
1642
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1643
                                 default=False, action="store_true",
1644
                                 help="Include default values")
1645

    
1646
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1647
                         action="store_true", default=False,
1648
                         help="Hotplug supported devices (NICs and Disks)")
1649

    
1650
#: Options provided by all commands
1651
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1652

    
1653
# options related to asynchronous job handling
1654

    
1655
SUBMIT_OPTS = [
1656
  SUBMIT_OPT,
1657
  PRINT_JOBID_OPT,
1658
  ]
1659

    
1660
# common options for creating instances. add and import then add their own
1661
# specific ones.
1662
COMMON_CREATE_OPTS = [
1663
  BACKEND_OPT,
1664
  DISK_OPT,
1665
  DISK_TEMPLATE_OPT,
1666
  FILESTORE_DIR_OPT,
1667
  FILESTORE_DRIVER_OPT,
1668
  HYPERVISOR_OPT,
1669
  IALLOCATOR_OPT,
1670
  NET_OPT,
1671
  NODE_PLACEMENT_OPT,
1672
  NOIPCHECK_OPT,
1673
  NOCONFLICTSCHECK_OPT,
1674
  NONAMECHECK_OPT,
1675
  NONICS_OPT,
1676
  NWSYNC_OPT,
1677
  OSPARAMS_OPT,
1678
  OS_SIZE_OPT,
1679
  SUBMIT_OPT,
1680
  PRINT_JOBID_OPT,
1681
  TAG_ADD_OPT,
1682
  DRY_RUN_OPT,
1683
  PRIORITY_OPT,
1684
  ]
1685

    
1686
# common instance policy options
1687
INSTANCE_POLICY_OPTS = [
1688
  IPOLICY_BOUNDS_SPECS_OPT,
1689
  IPOLICY_DISK_TEMPLATES,
1690
  IPOLICY_VCPU_RATIO,
1691
  IPOLICY_SPINDLE_RATIO,
1692
  ]
1693

    
1694
# instance policy split specs options
1695
SPLIT_ISPECS_OPTS = [
1696
  SPECS_CPU_COUNT_OPT,
1697
  SPECS_DISK_COUNT_OPT,
1698
  SPECS_DISK_SIZE_OPT,
1699
  SPECS_MEM_SIZE_OPT,
1700
  SPECS_NIC_COUNT_OPT,
1701
  ]
1702

    
1703

    
1704
class _ShowUsage(Exception):
1705
  """Exception class for L{_ParseArgs}.
1706

1707
  """
1708
  def __init__(self, exit_error):
1709
    """Initializes instances of this class.
1710

1711
    @type exit_error: bool
1712
    @param exit_error: Whether to report failure on exit
1713

1714
    """
1715
    Exception.__init__(self)
1716
    self.exit_error = exit_error
1717

    
1718

    
1719
class _ShowVersion(Exception):
1720
  """Exception class for L{_ParseArgs}.
1721

1722
  """
1723

    
1724

    
1725
def _ParseArgs(binary, argv, commands, aliases, env_override):
1726
  """Parser for the command line arguments.
1727

1728
  This function parses the arguments and returns the function which
1729
  must be executed together with its (modified) arguments.
1730

1731
  @param binary: Script name
1732
  @param argv: Command line arguments
1733
  @param commands: Dictionary containing command definitions
1734
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1735
  @param env_override: list of env variables allowed for default args
1736
  @raise _ShowUsage: If usage description should be shown
1737
  @raise _ShowVersion: If version should be shown
1738

1739
  """
1740
  assert not (env_override - set(commands))
1741
  assert not (set(aliases.keys()) & set(commands.keys()))
1742

    
1743
  if len(argv) > 1:
1744
    cmd = argv[1]
1745
  else:
1746
    # No option or command given
1747
    raise _ShowUsage(exit_error=True)
1748

    
1749
  if cmd == "--version":
1750
    raise _ShowVersion()
1751
  elif cmd == "--help":
1752
    raise _ShowUsage(exit_error=False)
1753
  elif not (cmd in commands or cmd in aliases):
1754
    raise _ShowUsage(exit_error=True)
1755

    
1756
  # get command, unalias it, and look it up in commands
1757
  if cmd in aliases:
1758
    if aliases[cmd] not in commands:
1759
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1760
                                   " command '%s'" % (cmd, aliases[cmd]))
1761

    
1762
    cmd = aliases[cmd]
1763

    
1764
  if cmd in env_override:
1765
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1766
    env_args = os.environ.get(args_env_name)
1767
    if env_args:
1768
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1769

    
1770
  func, args_def, parser_opts, usage, description = commands[cmd]
1771
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1772
                        description=description,
1773
                        formatter=TitledHelpFormatter(),
1774
                        usage="%%prog %s %s" % (cmd, usage))
1775
  parser.disable_interspersed_args()
1776
  options, args = parser.parse_args(args=argv[2:])
1777

    
1778
  if not _CheckArguments(cmd, args_def, args):
1779
    return None, None, None
1780

    
1781
  return func, options, args
1782

    
1783

    
1784
def _FormatUsage(binary, commands):
1785
  """Generates a nice description of all commands.
1786

1787
  @param binary: Script name
1788
  @param commands: Dictionary containing command definitions
1789

1790
  """
1791
  # compute the max line length for cmd + usage
1792
  mlen = min(60, max(map(len, commands)))
1793

    
1794
  yield "Usage: %s {command} [options...] [argument...]" % binary
1795
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1796
  yield ""
1797
  yield "Commands:"
1798

    
1799
  # and format a nice command list
1800
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1801
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1802
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1803
    for line in help_lines:
1804
      yield " %-*s   %s" % (mlen, "", line)
1805

    
1806
  yield ""
1807

    
1808

    
1809
def _CheckArguments(cmd, args_def, args):
1810
  """Verifies the arguments using the argument definition.
1811

1812
  Algorithm:
1813

1814
    1. Abort with error if values specified by user but none expected.
1815

1816
    1. For each argument in definition
1817

1818
      1. Keep running count of minimum number of values (min_count)
1819
      1. Keep running count of maximum number of values (max_count)
1820
      1. If it has an unlimited number of values
1821

1822
        1. Abort with error if it's not the last argument in the definition
1823

1824
    1. If last argument has limited number of values
1825

1826
      1. Abort with error if number of values doesn't match or is too large
1827

1828
    1. Abort with error if user didn't pass enough values (min_count)
1829

1830
  """
1831
  if args and not args_def:
1832
    ToStderr("Error: Command %s expects no arguments", cmd)
1833
    return False
1834

    
1835
  min_count = None
1836
  max_count = None
1837
  check_max = None
1838

    
1839
  last_idx = len(args_def) - 1
1840

    
1841
  for idx, arg in enumerate(args_def):
1842
    if min_count is None:
1843
      min_count = arg.min
1844
    elif arg.min is not None:
1845
      min_count += arg.min
1846

    
1847
    if max_count is None:
1848
      max_count = arg.max
1849
    elif arg.max is not None:
1850
      max_count += arg.max
1851

    
1852
    if idx == last_idx:
1853
      check_max = (arg.max is not None)
1854

    
1855
    elif arg.max is None:
1856
      raise errors.ProgrammerError("Only the last argument can have max=None")
1857

    
1858
  if check_max:
1859
    # Command with exact number of arguments
1860
    if (min_count is not None and max_count is not None and
1861
        min_count == max_count and len(args) != min_count):
1862
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1863
      return False
1864

    
1865
    # Command with limited number of arguments
1866
    if max_count is not None and len(args) > max_count:
1867
      ToStderr("Error: Command %s expects only %d argument(s)",
1868
               cmd, max_count)
1869
      return False
1870

    
1871
  # Command with some required arguments
1872
  if min_count is not None and len(args) < min_count:
1873
    ToStderr("Error: Command %s expects at least %d argument(s)",
1874
             cmd, min_count)
1875
    return False
1876

    
1877
  return True
1878

    
1879

    
1880
def SplitNodeOption(value):
1881
  """Splits the value of a --node option.
1882

1883
  """
1884
  if value and ":" in value:
1885
    return value.split(":", 1)
1886
  else:
1887
    return (value, None)
1888

    
1889

    
1890
def CalculateOSNames(os_name, os_variants):
1891
  """Calculates all the names an OS can be called, according to its variants.
1892

1893
  @type os_name: string
1894
  @param os_name: base name of the os
1895
  @type os_variants: list or None
1896
  @param os_variants: list of supported variants
1897
  @rtype: list
1898
  @return: list of valid names
1899

1900
  """
1901
  if os_variants:
1902
    return ["%s+%s" % (os_name, v) for v in os_variants]
1903
  else:
1904
    return [os_name]
1905

    
1906

    
1907
def ParseFields(selected, default):
1908
  """Parses the values of "--field"-like options.
1909

1910
  @type selected: string or None
1911
  @param selected: User-selected options
1912
  @type default: list
1913
  @param default: Default fields
1914

1915
  """
1916
  if selected is None:
1917
    return default
1918

    
1919
  if selected.startswith("+"):
1920
    return default + selected[1:].split(",")
1921

    
1922
  return selected.split(",")
1923

    
1924

    
1925
UsesRPC = rpc.RunWithRPC
1926

    
1927

    
1928
def AskUser(text, choices=None):
1929
  """Ask the user a question.
1930

1931
  @param text: the question to ask
1932

1933
  @param choices: list with elements tuples (input_char, return_value,
1934
      description); if not given, it will default to: [('y', True,
1935
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1936
      note that the '?' char is reserved for help
1937

1938
  @return: one of the return values from the choices list; if input is
1939
      not possible (i.e. not running with a tty, we return the last
1940
      entry from the list
1941

1942
  """
1943
  if choices is None:
1944
    choices = [("y", True, "Perform the operation"),
1945
               ("n", False, "Do not perform the operation")]
1946
  if not choices or not isinstance(choices, list):
1947
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1948
  for entry in choices:
1949
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1950
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1951

    
1952
  answer = choices[-1][1]
1953
  new_text = []
1954
  for line in text.splitlines():
1955
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1956
  text = "\n".join(new_text)
1957
  try:
1958
    f = file("/dev/tty", "a+")
1959
  except IOError:
1960
    return answer
1961
  try:
1962
    chars = [entry[0] for entry in choices]
1963
    chars[-1] = "[%s]" % chars[-1]
1964
    chars.append("?")
1965
    maps = dict([(entry[0], entry[1]) for entry in choices])
1966
    while True:
1967
      f.write(text)
1968
      f.write("\n")
1969
      f.write("/".join(chars))
1970
      f.write(": ")
1971
      line = f.readline(2).strip().lower()
1972
      if line in maps:
1973
        answer = maps[line]
1974
        break
1975
      elif line == "?":
1976
        for entry in choices:
1977
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1978
        f.write("\n")
1979
        continue
1980
  finally:
1981
    f.close()
1982
  return answer
1983

    
1984

    
1985
class JobSubmittedException(Exception):
1986
  """Job was submitted, client should exit.
1987

1988
  This exception has one argument, the ID of the job that was
1989
  submitted. The handler should print this ID.
1990

1991
  This is not an error, just a structured way to exit from clients.
1992

1993
  """
1994

    
1995

    
1996
def SendJob(ops, cl=None):
1997
  """Function to submit an opcode without waiting for the results.
1998

1999
  @type ops: list
2000
  @param ops: list of opcodes
2001
  @type cl: luxi.Client
2002
  @param cl: the luxi client to use for communicating with the master;
2003
             if None, a new client will be created
2004

2005
  """
2006
  if cl is None:
2007
    cl = GetClient()
2008

    
2009
  job_id = cl.SubmitJob(ops)
2010

    
2011
  return job_id
2012

    
2013

    
2014
def GenericPollJob(job_id, cbs, report_cbs):
2015
  """Generic job-polling function.
2016

2017
  @type job_id: number
2018
  @param job_id: Job ID
2019
  @type cbs: Instance of L{JobPollCbBase}
2020
  @param cbs: Data callbacks
2021
  @type report_cbs: Instance of L{JobPollReportCbBase}
2022
  @param report_cbs: Reporting callbacks
2023

2024
  """
2025
  prev_job_info = None
2026
  prev_logmsg_serial = None
2027

    
2028
  status = None
2029

    
2030
  while True:
2031
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2032
                                      prev_logmsg_serial)
2033
    if not result:
2034
      # job not found, go away!
2035
      raise errors.JobLost("Job with id %s lost" % job_id)
2036

    
2037
    if result == constants.JOB_NOTCHANGED:
2038
      report_cbs.ReportNotChanged(job_id, status)
2039

    
2040
      # Wait again
2041
      continue
2042

    
2043
    # Split result, a tuple of (field values, log entries)
2044
    (job_info, log_entries) = result
2045
    (status, ) = job_info
2046

    
2047
    if log_entries:
2048
      for log_entry in log_entries:
2049
        (serial, timestamp, log_type, message) = log_entry
2050
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2051
                                    log_type, message)
2052
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2053

    
2054
    # TODO: Handle canceled and archived jobs
2055
    elif status in (constants.JOB_STATUS_SUCCESS,
2056
                    constants.JOB_STATUS_ERROR,
2057
                    constants.JOB_STATUS_CANCELING,
2058
                    constants.JOB_STATUS_CANCELED):
2059
      break
2060

    
2061
    prev_job_info = job_info
2062

    
2063
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2064
  if not jobs:
2065
    raise errors.JobLost("Job with id %s lost" % job_id)
2066

    
2067
  status, opstatus, result = jobs[0]
2068

    
2069
  if status == constants.JOB_STATUS_SUCCESS:
2070
    return result
2071

    
2072
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2073
    raise errors.OpExecError("Job was canceled")
2074

    
2075
  has_ok = False
2076
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2077
    if status == constants.OP_STATUS_SUCCESS:
2078
      has_ok = True
2079
    elif status == constants.OP_STATUS_ERROR:
2080
      errors.MaybeRaise(msg)
2081

    
2082
      if has_ok:
2083
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2084
                                 (idx, msg))
2085

    
2086
      raise errors.OpExecError(str(msg))
2087

    
2088
  # default failure mode
2089
  raise errors.OpExecError(result)
2090

    
2091

    
2092
class JobPollCbBase:
2093
  """Base class for L{GenericPollJob} callbacks.
2094

2095
  """
2096
  def __init__(self):
2097
    """Initializes this class.
2098

2099
    """
2100

    
2101
  def WaitForJobChangeOnce(self, job_id, fields,
2102
                           prev_job_info, prev_log_serial):
2103
    """Waits for changes on a job.
2104

2105
    """
2106
    raise NotImplementedError()
2107

    
2108
  def QueryJobs(self, job_ids, fields):
2109
    """Returns the selected fields for the selected job IDs.
2110

2111
    @type job_ids: list of numbers
2112
    @param job_ids: Job IDs
2113
    @type fields: list of strings
2114
    @param fields: Fields
2115

2116
    """
2117
    raise NotImplementedError()
2118

    
2119

    
2120
class JobPollReportCbBase:
2121
  """Base class for L{GenericPollJob} reporting callbacks.
2122

2123
  """
2124
  def __init__(self):
2125
    """Initializes this class.
2126

2127
    """
2128

    
2129
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2130
    """Handles a log message.
2131

2132
    """
2133
    raise NotImplementedError()
2134

    
2135
  def ReportNotChanged(self, job_id, status):
2136
    """Called for if a job hasn't changed in a while.
2137

2138
    @type job_id: number
2139
    @param job_id: Job ID
2140
    @type status: string or None
2141
    @param status: Job status if available
2142

2143
    """
2144
    raise NotImplementedError()
2145

    
2146

    
2147
class _LuxiJobPollCb(JobPollCbBase):
2148
  def __init__(self, cl):
2149
    """Initializes this class.
2150

2151
    """
2152
    JobPollCbBase.__init__(self)
2153
    self.cl = cl
2154

    
2155
  def WaitForJobChangeOnce(self, job_id, fields,
2156
                           prev_job_info, prev_log_serial):
2157
    """Waits for changes on a job.
2158

2159
    """
2160
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2161
                                        prev_job_info, prev_log_serial)
2162

    
2163
  def QueryJobs(self, job_ids, fields):
2164
    """Returns the selected fields for the selected job IDs.
2165

2166
    """
2167
    return self.cl.QueryJobs(job_ids, fields)
2168

    
2169

    
2170
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2171
  def __init__(self, feedback_fn):
2172
    """Initializes this class.
2173

2174
    """
2175
    JobPollReportCbBase.__init__(self)
2176

    
2177
    self.feedback_fn = feedback_fn
2178

    
2179
    assert callable(feedback_fn)
2180

    
2181
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2182
    """Handles a log message.
2183

2184
    """
2185
    self.feedback_fn((timestamp, log_type, log_msg))
2186

    
2187
  def ReportNotChanged(self, job_id, status):
2188
    """Called if a job hasn't changed in a while.
2189

2190
    """
2191
    # Ignore
2192

    
2193

    
2194
class StdioJobPollReportCb(JobPollReportCbBase):
2195
  def __init__(self):
2196
    """Initializes this class.
2197

2198
    """
2199
    JobPollReportCbBase.__init__(self)
2200

    
2201
    self.notified_queued = False
2202
    self.notified_waitlock = False
2203

    
2204
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2205
    """Handles a log message.
2206

2207
    """
2208
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2209
             FormatLogMessage(log_type, log_msg))
2210

    
2211
  def ReportNotChanged(self, job_id, status):
2212
    """Called if a job hasn't changed in a while.
2213

2214
    """
2215
    if status is None:
2216
      return
2217

    
2218
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2219
      ToStderr("Job %s is waiting in queue", job_id)
2220
      self.notified_queued = True
2221

    
2222
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2223
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2224
      self.notified_waitlock = True
2225

    
2226

    
2227
def FormatLogMessage(log_type, log_msg):
2228
  """Formats a job message according to its type.
2229

2230
  """
2231
  if log_type != constants.ELOG_MESSAGE:
2232
    log_msg = str(log_msg)
2233

    
2234
  return utils.SafeEncode(log_msg)
2235

    
2236

    
2237
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2238
  """Function to poll for the result of a job.
2239

2240
  @type job_id: job identified
2241
  @param job_id: the job to poll for results
2242
  @type cl: luxi.Client
2243
  @param cl: the luxi client to use for communicating with the master;
2244
             if None, a new client will be created
2245

2246
  """
2247
  if cl is None:
2248
    cl = GetClient()
2249

    
2250
  if reporter is None:
2251
    if feedback_fn:
2252
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2253
    else:
2254
      reporter = StdioJobPollReportCb()
2255
  elif feedback_fn:
2256
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2257

    
2258
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2259

    
2260

    
2261
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2262
  """Legacy function to submit an opcode.
2263

2264
  This is just a simple wrapper over the construction of the processor
2265
  instance. It should be extended to better handle feedback and
2266
  interaction functions.
2267

2268
  """
2269
  if cl is None:
2270
    cl = GetClient()
2271

    
2272
  SetGenericOpcodeOpts([op], opts)
2273

    
2274
  job_id = SendJob([op], cl=cl)
2275
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2276
    ToStdout("%d" % job_id)
2277

    
2278
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2279
                       reporter=reporter)
2280

    
2281
  return op_results[0]
2282

    
2283

    
2284
def SubmitOpCodeToDrainedQueue(op):
2285
  """Forcefully insert a job in the queue, even if it is drained.
2286

2287
  """
2288
  cl = GetClient()
2289
  job_id = cl.SubmitJobToDrainedQueue([op])
2290
  op_results = PollJob(job_id, cl=cl)
2291
  return op_results[0]
2292

    
2293

    
2294
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2295
  """Wrapper around SubmitOpCode or SendJob.
2296

2297
  This function will decide, based on the 'opts' parameter, whether to
2298
  submit and wait for the result of the opcode (and return it), or
2299
  whether to just send the job and print its identifier. It is used in
2300
  order to simplify the implementation of the '--submit' option.
2301

2302
  It will also process the opcodes if we're sending the via SendJob
2303
  (otherwise SubmitOpCode does it).
2304

2305
  """
2306
  if opts and opts.submit_only:
2307
    job = [op]
2308
    SetGenericOpcodeOpts(job, opts)
2309
    job_id = SendJob(job, cl=cl)
2310
    if opts.print_jobid:
2311
      ToStdout("%d" % job_id)
2312
    raise JobSubmittedException(job_id)
2313
  else:
2314
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2315

    
2316

    
2317
def _InitReasonTrail(op, opts):
2318
  """Builds the first part of the reason trail
2319

2320
  Builds the initial part of the reason trail, adding the user provided reason
2321
  (if it exists) and the name of the command starting the operation.
2322

2323
  @param op: the opcode the reason trail will be added to
2324
  @param opts: the command line options selected by the user
2325

2326
  """
2327
  assert len(sys.argv) >= 2
2328
  trail = []
2329

    
2330
  if opts.reason:
2331
    trail.append((constants.OPCODE_REASON_SRC_USER,
2332
                  opts.reason,
2333
                  utils.EpochNano()))
2334

    
2335
  binary = os.path.basename(sys.argv[0])
2336
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2337
  command = sys.argv[1]
2338
  trail.append((source, command, utils.EpochNano()))
2339
  op.reason = trail
2340

    
2341

    
2342
def SetGenericOpcodeOpts(opcode_list, options):
2343
  """Processor for generic options.
2344

2345
  This function updates the given opcodes based on generic command
2346
  line options (like debug, dry-run, etc.).
2347

2348
  @param opcode_list: list of opcodes
2349
  @param options: command line options or None
2350
  @return: None (in-place modification)
2351

2352
  """
2353
  if not options:
2354
    return
2355
  for op in opcode_list:
2356
    op.debug_level = options.debug
2357
    if hasattr(options, "dry_run"):
2358
      op.dry_run = options.dry_run
2359
    if getattr(options, "priority", None) is not None:
2360
      op.priority = options.priority
2361
    _InitReasonTrail(op, options)
2362

    
2363

    
2364
def GetClient(query=False):
2365
  """Connects to the a luxi socket and returns a client.
2366

2367
  @type query: boolean
2368
  @param query: this signifies that the client will only be
2369
      used for queries; if the build-time parameter
2370
      enable-split-queries is enabled, then the client will be
2371
      connected to the query socket instead of the masterd socket
2372

2373
  """
2374
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2375
  if override_socket:
2376
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2377
      address = pathutils.MASTER_SOCKET
2378
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2379
      address = pathutils.QUERY_SOCKET
2380
    else:
2381
      address = override_socket
2382
  elif query:
2383
    address = pathutils.QUERY_SOCKET
2384
  else:
2385
    address = None
2386
  # TODO: Cache object?
2387
  try:
2388
    client = luxi.Client(address=address)
2389
  except luxi.NoMasterError:
2390
    ss = ssconf.SimpleStore()
2391

    
2392
    # Try to read ssconf file
2393
    try:
2394
      ss.GetMasterNode()
2395
    except errors.ConfigurationError:
2396
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2397
                                 " not part of a cluster",
2398
                                 errors.ECODE_INVAL)
2399

    
2400
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2401
    if master != myself:
2402
      raise errors.OpPrereqError("This is not the master node, please connect"
2403
                                 " to node '%s' and rerun the command" %
2404
                                 master, errors.ECODE_INVAL)
2405
    raise
2406
  return client
2407

    
2408

    
2409
def FormatError(err):
2410
  """Return a formatted error message for a given error.
2411

2412
  This function takes an exception instance and returns a tuple
2413
  consisting of two values: first, the recommended exit code, and
2414
  second, a string describing the error message (not
2415
  newline-terminated).
2416

2417
  """
2418
  retcode = 1
2419
  obuf = StringIO()
2420
  msg = str(err)
2421
  if isinstance(err, errors.ConfigurationError):
2422
    txt = "Corrupt configuration file: %s" % msg
2423
    logging.error(txt)
2424
    obuf.write(txt + "\n")
2425
    obuf.write("Aborting.")
2426
    retcode = 2
2427
  elif isinstance(err, errors.HooksAbort):
2428
    obuf.write("Failure: hooks execution failed:\n")
2429
    for node, script, out in err.args[0]:
2430
      if out:
2431
        obuf.write("  node: %s, script: %s, output: %s\n" %
2432
                   (node, script, out))
2433
      else:
2434
        obuf.write("  node: %s, script: %s (no output)\n" %
2435
                   (node, script))
2436
  elif isinstance(err, errors.HooksFailure):
2437
    obuf.write("Failure: hooks general failure: %s" % msg)
2438
  elif isinstance(err, errors.ResolverError):
2439
    this_host = netutils.Hostname.GetSysName()
2440
    if err.args[0] == this_host:
2441
      msg = "Failure: can't resolve my own hostname ('%s')"
2442
    else:
2443
      msg = "Failure: can't resolve hostname '%s'"
2444
    obuf.write(msg % err.args[0])
2445
  elif isinstance(err, errors.OpPrereqError):
2446
    if len(err.args) == 2:
2447
      obuf.write("Failure: prerequisites not met for this"
2448
                 " operation:\nerror type: %s, error details:\n%s" %
2449
                 (err.args[1], err.args[0]))
2450
    else:
2451
      obuf.write("Failure: prerequisites not met for this"
2452
                 " operation:\n%s" % msg)
2453
  elif isinstance(err, errors.OpExecError):
2454
    obuf.write("Failure: command execution error:\n%s" % msg)
2455
  elif isinstance(err, errors.TagError):
2456
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2457
  elif isinstance(err, errors.JobQueueDrainError):
2458
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2459
               " accept new requests\n")
2460
  elif isinstance(err, errors.JobQueueFull):
2461
    obuf.write("Failure: the job queue is full and doesn't accept new"
2462
               " job submissions until old jobs are archived\n")
2463
  elif isinstance(err, errors.TypeEnforcementError):
2464
    obuf.write("Parameter Error: %s" % msg)
2465
  elif isinstance(err, errors.ParameterError):
2466
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2467
  elif isinstance(err, luxi.NoMasterError):
2468
    if err.args[0] == pathutils.MASTER_SOCKET:
2469
      daemon = "the master daemon"
2470
    elif err.args[0] == pathutils.QUERY_SOCKET:
2471
      daemon = "the config daemon"
2472
    else:
2473
      daemon = "socket '%s'" % str(err.args[0])
2474
    obuf.write("Cannot communicate with %s.\nIs the process running"
2475
               " and listening for connections?" % daemon)
2476
  elif isinstance(err, luxi.TimeoutError):
2477
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2478
               " been submitted and will continue to run even if the call"
2479
               " timed out. Useful commands in this situation are \"gnt-job"
2480
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2481
    obuf.write(msg)
2482
  elif isinstance(err, luxi.PermissionError):
2483
    obuf.write("It seems you don't have permissions to connect to the"
2484
               " master daemon.\nPlease retry as a different user.")
2485
  elif isinstance(err, luxi.ProtocolError):
2486
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2487
               "%s" % msg)
2488
  elif isinstance(err, errors.JobLost):
2489
    obuf.write("Error checking job status: %s" % msg)
2490
  elif isinstance(err, errors.QueryFilterParseError):
2491
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2492
    obuf.write("\n".join(err.GetDetails()))
2493
  elif isinstance(err, errors.GenericError):
2494
    obuf.write("Unhandled Ganeti error: %s" % msg)
2495
  elif isinstance(err, JobSubmittedException):
2496
    obuf.write("JobID: %s\n" % err.args[0])
2497
    retcode = 0
2498
  else:
2499
    obuf.write("Unhandled exception: %s" % msg)
2500
  return retcode, obuf.getvalue().rstrip("\n")
2501

    
2502

    
2503
def GenericMain(commands, override=None, aliases=None,
2504
                env_override=frozenset()):
2505
  """Generic main function for all the gnt-* commands.
2506

2507
  @param commands: a dictionary with a special structure, see the design doc
2508
                   for command line handling.
2509
  @param override: if not None, we expect a dictionary with keys that will
2510
                   override command line options; this can be used to pass
2511
                   options from the scripts to generic functions
2512
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2513
  @param env_override: list of environment names which are allowed to submit
2514
                       default args for commands
2515

2516
  """
2517
  # save the program name and the entire command line for later logging
2518
  if sys.argv:
2519
    binary = os.path.basename(sys.argv[0])
2520
    if not binary:
2521
      binary = sys.argv[0]
2522

    
2523
    if len(sys.argv) >= 2:
2524
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2525
    else:
2526
      logname = binary
2527

    
2528
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2529
  else:
2530
    binary = "<unknown program>"
2531
    cmdline = "<unknown>"
2532

    
2533
  if aliases is None:
2534
    aliases = {}
2535

    
2536
  try:
2537
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2538
                                       env_override)
2539
  except _ShowVersion:
2540
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2541
             constants.RELEASE_VERSION)
2542
    return constants.EXIT_SUCCESS
2543
  except _ShowUsage, err:
2544
    for line in _FormatUsage(binary, commands):
2545
      ToStdout(line)
2546

    
2547
    if err.exit_error:
2548
      return constants.EXIT_FAILURE
2549
    else:
2550
      return constants.EXIT_SUCCESS
2551
  except errors.ParameterError, err:
2552
    result, err_msg = FormatError(err)
2553
    ToStderr(err_msg)
2554
    return 1
2555

    
2556
  if func is None: # parse error
2557
    return 1
2558

    
2559
  if override is not None:
2560
    for key, val in override.iteritems():
2561
      setattr(options, key, val)
2562

    
2563
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2564
                     stderr_logging=True)
2565

    
2566
  logging.info("Command line: %s", cmdline)
2567

    
2568
  try:
2569
    result = func(options, args)
2570
  except (errors.GenericError, luxi.ProtocolError,
2571
          JobSubmittedException), err:
2572
    result, err_msg = FormatError(err)
2573
    logging.exception("Error during command processing")
2574
    ToStderr(err_msg)
2575
  except KeyboardInterrupt:
2576
    result = constants.EXIT_FAILURE
2577
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2578
             " might have been submitted and"
2579
             " will continue to run in the background.")
2580
  except IOError, err:
2581
    if err.errno == errno.EPIPE:
2582
      # our terminal went away, we'll exit
2583
      sys.exit(constants.EXIT_FAILURE)
2584
    else:
2585
      raise
2586

    
2587
  return result
2588

    
2589

    
2590
def ParseNicOption(optvalue):
2591
  """Parses the value of the --net option(s).
2592

2593
  """
2594
  try:
2595
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2596
  except (TypeError, ValueError), err:
2597
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2598
                               errors.ECODE_INVAL)
2599

    
2600
  nics = [{}] * nic_max
2601
  for nidx, ndict in optvalue:
2602
    nidx = int(nidx)
2603

    
2604
    if not isinstance(ndict, dict):
2605
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2606
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2607

    
2608
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2609

    
2610
    nics[nidx] = ndict
2611

    
2612
  return nics
2613

    
2614

    
2615
def GenericInstanceCreate(mode, opts, args):
2616
  """Add an instance to the cluster via either creation or import.
2617

2618
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2619
  @param opts: the command line options selected by the user
2620
  @type args: list
2621
  @param args: should contain only one element, the new instance name
2622
  @rtype: int
2623
  @return: the desired exit code
2624

2625
  """
2626
  instance = args[0]
2627

    
2628
  (pnode, snode) = SplitNodeOption(opts.node)
2629

    
2630
  hypervisor = None
2631
  hvparams = {}
2632
  if opts.hypervisor:
2633
    hypervisor, hvparams = opts.hypervisor
2634

    
2635
  if opts.nics:
2636
    nics = ParseNicOption(opts.nics)
2637
  elif opts.no_nics:
2638
    # no nics
2639
    nics = []
2640
  elif mode == constants.INSTANCE_CREATE:
2641
    # default of one nic, all auto
2642
    nics = [{}]
2643
  else:
2644
    # mode == import
2645
    nics = []
2646

    
2647
  if opts.disk_template == constants.DT_DISKLESS:
2648
    if opts.disks or opts.sd_size is not None:
2649
      raise errors.OpPrereqError("Diskless instance but disk"
2650
                                 " information passed", errors.ECODE_INVAL)
2651
    disks = []
2652
  else:
2653
    if (not opts.disks and not opts.sd_size
2654
        and mode == constants.INSTANCE_CREATE):
2655
      raise errors.OpPrereqError("No disk information specified",
2656
                                 errors.ECODE_INVAL)
2657
    if opts.disks and opts.sd_size is not None:
2658
      raise errors.OpPrereqError("Please use either the '--disk' or"
2659
                                 " '-s' option", errors.ECODE_INVAL)
2660
    if opts.sd_size is not None:
2661
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2662

    
2663
    if opts.disks:
2664
      try:
2665
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2666
      except ValueError, err:
2667
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2668
                                   errors.ECODE_INVAL)
2669
      disks = [{}] * disk_max
2670
    else:
2671
      disks = []
2672
    for didx, ddict in opts.disks:
2673
      didx = int(didx)
2674
      if not isinstance(ddict, dict):
2675
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2676
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2677
      elif constants.IDISK_SIZE in ddict:
2678
        if constants.IDISK_ADOPT in ddict:
2679
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2680
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2681
        try:
2682
          ddict[constants.IDISK_SIZE] = \
2683
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2684
        except ValueError, err:
2685
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2686
                                     (didx, err), errors.ECODE_INVAL)
2687
      elif constants.IDISK_ADOPT in ddict:
2688
        if constants.IDISK_SPINDLES in ddict:
2689
          raise errors.OpPrereqError("spindles is not a valid option when"
2690
                                     " adopting a disk", errors.ECODE_INVAL)
2691
        if mode == constants.INSTANCE_IMPORT:
2692
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2693
                                     " import", errors.ECODE_INVAL)
2694
        ddict[constants.IDISK_SIZE] = 0
2695
      else:
2696
        raise errors.OpPrereqError("Missing size or adoption source for"
2697
                                   " disk %d" % didx, errors.ECODE_INVAL)
2698
      disks[didx] = ddict
2699

    
2700
  if opts.tags is not None:
2701
    tags = opts.tags.split(",")
2702
  else:
2703
    tags = []
2704

    
2705
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2706
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2707

    
2708
  if mode == constants.INSTANCE_CREATE:
2709
    start = opts.start
2710
    os_type = opts.os
2711
    force_variant = opts.force_variant
2712
    src_node = None
2713
    src_path = None
2714
    no_install = opts.no_install
2715
    identify_defaults = False
2716
    compress = constants.IEC_NONE
2717
  elif mode == constants.INSTANCE_IMPORT:
2718
    start = False
2719
    os_type = None
2720
    force_variant = False
2721
    src_node = opts.src_node
2722
    src_path = opts.src_dir
2723
    no_install = None
2724
    identify_defaults = opts.identify_defaults
2725
    compress = opts.compress
2726
  else:
2727
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2728

    
2729
  op = opcodes.OpInstanceCreate(instance_name=instance,
2730
                                disks=disks,
2731
                                disk_template=opts.disk_template,
2732
                                nics=nics,
2733
                                conflicts_check=opts.conflicts_check,
2734
                                pnode=pnode, snode=snode,
2735
                                ip_check=opts.ip_check,
2736
                                name_check=opts.name_check,
2737
                                wait_for_sync=opts.wait_for_sync,
2738
                                file_storage_dir=opts.file_storage_dir,
2739
                                file_driver=opts.file_driver,
2740
                                iallocator=opts.iallocator,
2741
                                hypervisor=hypervisor,
2742
                                hvparams=hvparams,
2743
                                beparams=opts.beparams,
2744
                                osparams=opts.osparams,
2745
                                mode=mode,
2746
                                start=start,
2747
                                os_type=os_type,
2748
                                force_variant=force_variant,
2749
                                src_node=src_node,
2750
                                src_path=src_path,
2751
                                compress=compress,
2752
                                tags=tags,
2753
                                no_install=no_install,
2754
                                identify_defaults=identify_defaults,
2755
                                ignore_ipolicy=opts.ignore_ipolicy)
2756

    
2757
  SubmitOrSend(op, opts)
2758
  return 0
2759

    
2760

    
2761
class _RunWhileClusterStoppedHelper:
2762
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2763

2764
  """
2765
  def __init__(self, feedback_fn, cluster_name, master_node,
2766
               online_nodes, ssh_ports):
2767
    """Initializes this class.
2768

2769
    @type feedback_fn: callable
2770
    @param feedback_fn: Feedback function
2771
    @type cluster_name: string
2772
    @param cluster_name: Cluster name
2773
    @type master_node: string
2774
    @param master_node Master node name
2775
    @type online_nodes: list
2776
    @param online_nodes: List of names of online nodes
2777
    @type ssh_ports: list
2778
    @param ssh_ports: List of SSH ports of online nodes
2779

2780
    """
2781
    self.feedback_fn = feedback_fn
2782
    self.cluster_name = cluster_name
2783
    self.master_node = master_node
2784
    self.online_nodes = online_nodes
2785
    self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2786

    
2787
    self.ssh = ssh.SshRunner(self.cluster_name)
2788

    
2789
    self.nonmaster_nodes = [name for name in online_nodes
2790
                            if name != master_node]
2791

    
2792
    assert self.master_node not in self.nonmaster_nodes
2793

    
2794
  def _RunCmd(self, node_name, cmd):
2795
    """Runs a command on the local or a remote machine.
2796

2797
    @type node_name: string
2798
    @param node_name: Machine name
2799
    @type cmd: list
2800
    @param cmd: Command
2801

2802
    """
2803
    if node_name is None or node_name == self.master_node:
2804
      # No need to use SSH
2805
      result = utils.RunCmd(cmd)
2806
    else:
2807
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2808
                            utils.ShellQuoteArgs(cmd),
2809
                            port=self.ssh_ports[node_name])
2810

    
2811
    if result.failed:
2812
      errmsg = ["Failed to run command %s" % result.cmd]
2813
      if node_name:
2814
        errmsg.append("on node %s" % node_name)
2815
      errmsg.append(": exitcode %s and error %s" %
2816
                    (result.exit_code, result.output))
2817
      raise errors.OpExecError(" ".join(errmsg))
2818

    
2819
  def Call(self, fn, *args):
2820
    """Call function while all daemons are stopped.
2821

2822
    @type fn: callable
2823
    @param fn: Function to be called
2824

2825
    """
2826
    # Pause watcher by acquiring an exclusive lock on watcher state file
2827
    self.feedback_fn("Blocking watcher")
2828
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2829
    try:
2830
      # TODO: Currently, this just blocks. There's no timeout.
2831
      # TODO: Should it be a shared lock?
2832
      watcher_block.Exclusive(blocking=True)
2833

    
2834
      # Stop master daemons, so that no new jobs can come in and all running
2835
      # ones are finished
2836
      self.feedback_fn("Stopping master daemons")
2837
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2838
      try:
2839
        # Stop daemons on all nodes
2840
        for node_name in self.online_nodes:
2841
          self.feedback_fn("Stopping daemons on %s" % node_name)
2842
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2843

    
2844
        # All daemons are shut down now
2845
        try:
2846
          return fn(self, *args)
2847
        except Exception, err:
2848
          _, errmsg = FormatError(err)
2849
          logging.exception("Caught exception")
2850
          self.feedback_fn(errmsg)
2851
          raise
2852
      finally:
2853
        # Start cluster again, master node last
2854
        for node_name in self.nonmaster_nodes + [self.master_node]:
2855
          self.feedback_fn("Starting daemons on %s" % node_name)
2856
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2857
    finally:
2858
      # Resume watcher
2859
      watcher_block.Close()
2860

    
2861

    
2862
def RunWhileClusterStopped(feedback_fn, fn, *args):
2863
  """Calls a function while all cluster daemons are stopped.
2864

2865
  @type feedback_fn: callable
2866
  @param feedback_fn: Feedback function
2867
  @type fn: callable
2868
  @param fn: Function to be called when daemons are stopped
2869

2870
  """
2871
  feedback_fn("Gathering cluster information")
2872

    
2873
  # This ensures we're running on the master daemon
2874
  cl = GetClient()
2875
  # Query client
2876
  qcl = GetClient(query=True)
2877

    
2878
  (cluster_name, master_node) = \
2879
    cl.QueryConfigValues(["cluster_name", "master_node"])
2880

    
2881
  online_nodes = GetOnlineNodes([], cl=qcl)
2882
  ssh_ports = GetNodesSshPorts(online_nodes, qcl)
2883

    
2884
  # Don't keep a reference to the client. The master daemon will go away.
2885
  del cl
2886
  del qcl
2887

    
2888
  assert master_node in online_nodes
2889

    
2890
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2891
                                       online_nodes, ssh_ports).Call(fn, *args)
2892

    
2893

    
2894
def GenerateTable(headers, fields, separator, data,
2895
                  numfields=None, unitfields=None,
2896
                  units=None):
2897
  """Prints a table with headers and different fields.
2898

2899
  @type headers: dict
2900
  @param headers: dictionary mapping field names to headers for
2901
      the table
2902
  @type fields: list
2903
  @param fields: the field names corresponding to each row in
2904
      the data field
2905
  @param separator: the separator to be used; if this is None,
2906
      the default 'smart' algorithm is used which computes optimal
2907
      field width, otherwise just the separator is used between
2908
      each field
2909
  @type data: list
2910
  @param data: a list of lists, each sublist being one row to be output
2911
  @type numfields: list
2912
  @param numfields: a list with the fields that hold numeric
2913
      values and thus should be right-aligned
2914
  @type unitfields: list
2915
  @param unitfields: a list with the fields that hold numeric
2916
      values that should be formatted with the units field
2917
  @type units: string or None
2918
  @param units: the units we should use for formatting, or None for
2919
      automatic choice (human-readable for non-separator usage, otherwise
2920
      megabytes); this is a one-letter string
2921

2922
  """
2923
  if units is None:
2924
    if separator:
2925
      units = "m"
2926
    else:
2927
      units = "h"
2928

    
2929
  if numfields is None:
2930
    numfields = []
2931
  if unitfields is None:
2932
    unitfields = []
2933

    
2934
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2935
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2936

    
2937
  format_fields = []
2938
  for field in fields:
2939
    if headers and field not in headers:
2940
      # TODO: handle better unknown fields (either revert to old
2941
      # style of raising exception, or deal more intelligently with
2942
      # variable fields)
2943
      headers[field] = field
2944
    if separator is not None:
2945
      format_fields.append("%s")
2946
    elif numfields.Matches(field):
2947
      format_fields.append("%*s")
2948
    else:
2949
      format_fields.append("%-*s")
2950

    
2951
  if separator is None:
2952
    mlens = [0 for name in fields]
2953
    format_str = " ".join(format_fields)
2954
  else:
2955
    format_str = separator.replace("%", "%%").join(format_fields)
2956

    
2957
  for row in data:
2958
    if row is None:
2959
      continue
2960
    for idx, val in enumerate(row):
2961
      if unitfields.Matches(fields[idx]):
2962
        try:
2963
          val = int(val)
2964
        except (TypeError, ValueError):
2965
          pass
2966
        else:
2967
          val = row[idx] = utils.FormatUnit(val, units)
2968
      val = row[idx] = str(val)
2969
      if separator is None:
2970
        mlens[idx] = max(mlens[idx], len(val))
2971

    
2972
  result = []
2973
  if headers:
2974
    args = []
2975
    for idx, name in enumerate(fields):
2976
      hdr = headers[name]
2977
      if separator is None:
2978
        mlens[idx] = max(mlens[idx], len(hdr))
2979
        args.append(mlens[idx])
2980
      args.append(hdr)
2981
    result.append(format_str % tuple(args))
2982

    
2983
  if separator is None:
2984
    assert len(mlens) == len(fields)
2985

    
2986
    if fields and not numfields.Matches(fields[-1]):
2987
      mlens[-1] = 0
2988

    
2989
  for line in data:
2990
    args = []
2991
    if line is None:
2992
      line = ["-" for _ in fields]
2993
    for idx in range(len(fields)):
2994
      if separator is None:
2995
        args.append(mlens[idx])
2996
      args.append(line[idx])
2997
    result.append(format_str % tuple(args))
2998

    
2999
  return result
3000

    
3001

    
3002
def _FormatBool(value):
3003
  """Formats a boolean value as a string.
3004

3005
  """
3006
  if value:
3007
    return "Y"
3008
  return "N"
3009

    
3010

    
3011
#: Default formatting for query results; (callback, align right)
3012
_DEFAULT_FORMAT_QUERY = {
3013
  constants.QFT_TEXT: (str, False),
3014
  constants.QFT_BOOL: (_FormatBool, False),
3015
  constants.QFT_NUMBER: (str, True),
3016
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3017
  constants.QFT_OTHER: (str, False),
3018
  constants.QFT_UNKNOWN: (str, False),
3019
  }
3020

    
3021

    
3022
def _GetColumnFormatter(fdef, override, unit):
3023
  """Returns formatting function for a field.
3024

3025
  @type fdef: L{objects.QueryFieldDefinition}
3026
  @type override: dict
3027
  @param override: Dictionary for overriding field formatting functions,
3028
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3029
  @type unit: string
3030
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3031
  @rtype: tuple; (callable, bool)
3032
  @return: Returns the function to format a value (takes one parameter) and a
3033
    boolean for aligning the value on the right-hand side
3034

3035
  """
3036
  fmt = override.get(fdef.name, None)
3037
  if fmt is not None:
3038
    return fmt
3039

    
3040
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3041

    
3042
  if fdef.kind == constants.QFT_UNIT:
3043
    # Can't keep this information in the static dictionary
3044
    return (lambda value: utils.FormatUnit(value, unit), True)
3045

    
3046
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3047
  if fmt is not None:
3048
    return fmt
3049

    
3050
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3051

    
3052

    
3053
class _QueryColumnFormatter:
3054
  """Callable class for formatting fields of a query.
3055

3056
  """
3057
  def __init__(self, fn, status_fn, verbose):
3058
    """Initializes this class.
3059

3060
    @type fn: callable
3061
    @param fn: Formatting function
3062
    @type status_fn: callable
3063
    @param status_fn: Function to report fields' status
3064
    @type verbose: boolean
3065
    @param verbose: whether to use verbose field descriptions or not
3066

3067
    """
3068
    self._fn = fn
3069
    self._status_fn = status_fn
3070
    self._verbose = verbose
3071

    
3072
  def __call__(self, data):
3073
    """Returns a field's string representation.
3074

3075
    """
3076
    (status, value) = data
3077

    
3078
    # Report status
3079
    self._status_fn(status)
3080

    
3081
    if status == constants.RS_NORMAL:
3082
      return self._fn(value)
3083

    
3084
    assert value is None, \
3085
           "Found value %r for abnormal status %s" % (value, status)
3086

    
3087
    return FormatResultError(status, self._verbose)
3088

    
3089

    
3090
def FormatResultError(status, verbose):
3091
  """Formats result status other than L{constants.RS_NORMAL}.
3092

3093
  @param status: The result status
3094
  @type verbose: boolean
3095
  @param verbose: Whether to return the verbose text
3096
  @return: Text of result status
3097

3098
  """
3099
  assert status != constants.RS_NORMAL, \
3100
         "FormatResultError called with status equal to constants.RS_NORMAL"
3101
  try:
3102
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3103
  except KeyError:
3104
    raise NotImplementedError("Unknown status %s" % status)
3105
  else:
3106
    if verbose:
3107
      return verbose_text
3108
    return normal_text
3109

    
3110

    
3111
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3112
                      header=False, verbose=False):
3113
  """Formats data in L{objects.QueryResponse}.
3114

3115
  @type result: L{objects.QueryResponse}
3116
  @param result: result of query operation
3117
  @type unit: string
3118
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3119
    see L{utils.text.FormatUnit}
3120
  @type format_override: dict
3121
  @param format_override: Dictionary for overriding field formatting functions,
3122
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3123
  @type separator: string or None
3124
  @param separator: String used to separate fields
3125
  @type header: bool
3126
  @param header: Whether to output header row
3127
  @type verbose: boolean
3128
  @param verbose: whether to use verbose field descriptions or not
3129

3130
  """
3131
  if unit is None:
3132
    if separator:
3133
      unit = "m"
3134
    else:
3135
      unit = "h"
3136

    
3137
  if format_override is None:
3138
    format_override = {}
3139

    
3140
  stats = dict.fromkeys(constants.RS_ALL, 0)
3141

    
3142
  def _RecordStatus(status):
3143
    if status in stats:
3144
      stats[status] += 1
3145

    
3146
  columns = []
3147
  for fdef in result.fields:
3148
    assert fdef.title and fdef.name
3149
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3150
    columns.append(TableColumn(fdef.title,
3151
                               _QueryColumnFormatter(fn, _RecordStatus,
3152
                                                     verbose),
3153
                               align_right))
3154

    
3155
  table = FormatTable(result.data, columns, header, separator)
3156

    
3157
  # Collect statistics
3158
  assert len(stats) == len(constants.RS_ALL)
3159
  assert compat.all(count >= 0 for count in stats.values())
3160

    
3161
  # Determine overall status. If there was no data, unknown fields must be
3162
  # detected via the field definitions.
3163
  if (stats[constants.RS_UNKNOWN] or
3164
      (not result.data and _GetUnknownFields(result.fields))):
3165
    status = QR_UNKNOWN
3166
  elif compat.any(count > 0 for key, count in stats.items()
3167
                  if key != constants.RS_NORMAL):
3168
    status = QR_INCOMPLETE
3169
  else:
3170
    status = QR_NORMAL
3171

    
3172
  return (status, table)
3173

    
3174

    
3175
def _GetUnknownFields(fdefs):
3176
  """Returns list of unknown fields included in C{fdefs}.
3177

3178
  @type fdefs: list of L{objects.QueryFieldDefinition}
3179

3180
  """
3181
  return [fdef for fdef in fdefs
3182
          if fdef.kind == constants.QFT_UNKNOWN]
3183

    
3184

    
3185
def _WarnUnknownFields(fdefs):
3186
  """Prints a warning to stderr if a query included unknown fields.
3187

3188
  @type fdefs: list of L{objects.QueryFieldDefinition}
3189

3190
  """
3191
  unknown = _GetUnknownFields(fdefs)
3192
  if unknown:
3193
    ToStderr("Warning: Queried for unknown fields %s",
3194
             utils.CommaJoin(fdef.name for fdef in unknown))
3195
    return True
3196

    
3197
  return False
3198

    
3199

    
3200
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3201
                format_override=None, verbose=False, force_filter=False,
3202
                namefield=None, qfilter=None, isnumeric=False):
3203
  """Generic implementation for listing all items of a resource.
3204

3205
  @param resource: One of L{constants.QR_VIA_LUXI}
3206
  @type fields: list of strings
3207
  @param fields: List of fields to query for
3208
  @type names: list of strings
3209
  @param names: Names of items to query for
3210
  @type unit: string or None
3211
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3212
    None for automatic choice (human-readable for non-separator usage,
3213
    otherwise megabytes); this is a one-letter string
3214
  @type separator: string or None
3215
  @param separator: String used to separate fields
3216
  @type header: bool
3217
  @param header: Whether to show header row
3218
  @type force_filter: bool
3219
  @param force_filter: Whether to always treat names as filter
3220
  @type format_override: dict
3221
  @param format_override: Dictionary for overriding field formatting functions,
3222
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3223
  @type verbose: boolean
3224
  @param verbose: whether to use verbose field descriptions or not
3225
  @type namefield: string
3226
  @param namefield: Name of field to use for simple filters (see
3227
    L{qlang.MakeFilter} for details)
3228
  @type qfilter: list or None
3229
  @param qfilter: Query filter (in addition to names)
3230
  @param isnumeric: bool
3231
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3232
    any simple filters built by namefield should use integer values to
3233
    reflect that
3234

3235
  """
3236
  if not names:
3237
    names = None
3238

    
3239
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3240
                                isnumeric=isnumeric)
3241

    
3242
  if qfilter is None:
3243
    qfilter = namefilter
3244
  elif namefilter is not None:
3245
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3246

    
3247
  if cl is None:
3248
    cl = GetClient()
3249

    
3250
  response = cl.Query(resource, fields, qfilter)
3251

    
3252
  found_unknown = _WarnUnknownFields(response.fields)
3253

    
3254
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3255
                                     header=header,
3256
                                     format_override=format_override,
3257
                                     verbose=verbose)
3258

    
3259
  for line in data:
3260
    ToStdout(line)
3261

    
3262
  assert ((found_unknown and status == QR_UNKNOWN) or
3263
          (not found_unknown and status != QR_UNKNOWN))
3264

    
3265
  if status == QR_UNKNOWN:
3266
    return constants.EXIT_UNKNOWN_FIELD
3267

    
3268
  # TODO: Should the list command fail if not all data could be collected?
3269
  return constants.EXIT_SUCCESS
3270

    
3271

    
3272
def _FieldDescValues(fdef):
3273
  """Helper function for L{GenericListFields} to get query field description.
3274

3275
  @type fdef: L{objects.QueryFieldDefinition}
3276
  @rtype: list
3277

3278
  """
3279
  return [
3280
    fdef.name,
3281
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3282
    fdef.title,
3283
    fdef.doc,
3284
    ]
3285

    
3286

    
3287
def GenericListFields(resource, fields, separator, header, cl=None):
3288
  """Generic implementation for listing fields for a resource.
3289

3290
  @param resource: One of L{constants.QR_VIA_LUXI}
3291
  @type fields: list of strings
3292
  @param fields: List of fields to query for
3293
  @type separator: string or None
3294
  @param separator: String used to separate fields
3295
  @type header: bool
3296
  @param header: Whether to show header row
3297

3298
  """
3299
  if cl is None:
3300
    cl = GetClient()
3301

    
3302
  if not fields:
3303
    fields = None
3304

    
3305
  response = cl.QueryFields(resource, fields)
3306

    
3307
  found_unknown = _WarnUnknownFields(response.fields)
3308

    
3309
  columns = [
3310
    TableColumn("Name", str, False),
3311
    TableColumn("Type", str, False),
3312
    TableColumn("Title", str, False),
3313
    TableColumn("Description", str, False),
3314
    ]
3315

    
3316
  rows = map(_FieldDescValues, response.fields)
3317

    
3318
  for line in FormatTable(rows, columns, header, separator):
3319
    ToStdout(line)
3320

    
3321
  if found_unknown:
3322
    return constants.EXIT_UNKNOWN_FIELD
3323

    
3324
  return constants.EXIT_SUCCESS
3325

    
3326

    
3327
class TableColumn:
3328
  """Describes a column for L{FormatTable}.
3329

3330
  """
3331
  def __init__(self, title, fn, align_right):
3332
    """Initializes this class.
3333

3334
    @type title: string
3335
    @param title: Column title
3336
    @type fn: callable
3337
    @param fn: Formatting function
3338
    @type align_right: bool
3339
    @param align_right: Whether to align values on the right-hand side
3340

3341
    """
3342
    self.title = title
3343
    self.format = fn
3344
    self.align_right = align_right
3345

    
3346

    
3347
def _GetColFormatString(width, align_right):
3348
  """Returns the format string for a field.
3349

3350
  """
3351
  if align_right:
3352
    sign = ""
3353
  else:
3354
    sign = "-"
3355

    
3356
  return "%%%s%ss" % (sign, width)
3357

    
3358

    
3359
def FormatTable(rows, columns, header, separator):
3360
  """Formats data as a table.
3361

3362
  @type rows: list of lists
3363
  @param rows: Row data, one list per row
3364
  @type columns: list of L{TableColumn}
3365
  @param columns: Column descriptions
3366
  @type header: bool
3367
  @param header: Whether to show header row
3368
  @type separator: string or None
3369
  @param separator: String used to separate columns
3370

3371
  """
3372
  if header:
3373
    data = [[col.title for col in columns]]
3374
    colwidth = [len(col.title) for col in columns]
3375
  else:
3376
    data = []
3377
    colwidth = [0 for _ in columns]
3378

    
3379
  # Format row data
3380
  for row in rows:
3381
    assert len(row) == len(columns)
3382

    
3383
    formatted = [col.format(value) for value, col in zip(row, columns)]
3384

    
3385
    if separator is None:
3386
      # Update column widths
3387
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3388
        # Modifying a list's items while iterating is fine
3389
        colwidth[idx] = max(oldwidth, len(value))
3390

    
3391
    data.append(formatted)
3392

    
3393
  if separator is not None:
3394
    # Return early if a separator is used
3395
    return [separator.join(row) for row in data]
3396

    
3397
  if columns and not columns[-1].align_right:
3398
    # Avoid unnecessary spaces at end of line
3399
    colwidth[-1] = 0
3400

    
3401
  # Build format string
3402
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3403
                  for col, width in zip(columns, colwidth)])
3404

    
3405
  return [fmt % tuple(row) for row in data]
3406

    
3407

    
3408
def FormatTimestamp(ts):
3409
  """Formats a given timestamp.
3410

3411
  @type ts: timestamp
3412
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3413

3414
  @rtype: string
3415
  @return: a string with the formatted timestamp
3416

3417
  """
3418
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3419
    return "?"
3420

    
3421
  (sec, usecs) = ts
3422
  return utils.FormatTime(sec, usecs=usecs)
3423

    
3424

    
3425
def ParseTimespec(value):
3426
  """Parse a time specification.
3427

3428
  The following suffixed will be recognized:
3429

3430
    - s: seconds
3431
    - m: minutes
3432
    - h: hours
3433
    - d: day
3434
    - w: weeks
3435

3436
  Without any suffix, the value will be taken to be in seconds.
3437

3438
  """
3439
  value = str(value)
3440
  if not value:
3441
    raise errors.OpPrereqError("Empty time specification passed",
3442
                               errors.ECODE_INVAL)
3443
  suffix_map = {
3444
    "s": 1,
3445
    "m": 60,
3446
    "h": 3600,
3447
    "d": 86400,
3448
    "w": 604800,
3449
    }
3450
  if value[-1] not in suffix_map:
3451
    try:
3452
      value = int(value)
3453
    except (TypeError, ValueError):
3454
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3455
                                 errors.ECODE_INVAL)
3456
  else:
3457
    multiplier = suffix_map[value[-1]]
3458
    value = value[:-1]
3459
    if not value: # no data left after stripping the suffix
3460
      raise errors.OpPrereqError("Invalid time specification (only"
3461
                                 " suffix passed)", errors.ECODE_INVAL)
3462
    try:
3463
      value = int(value) * multiplier
3464
    except (TypeError, ValueError):
3465
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3466
                                 errors.ECODE_INVAL)
3467
  return value
3468

    
3469

    
3470
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3471
                   filter_master=False, nodegroup=None):
3472
  """Returns the names of online nodes.
3473

3474
  This function will also log a warning on stderr with the names of
3475
  the online nodes.
3476

3477
  @param nodes: if not empty, use only this subset of nodes (minus the
3478
      offline ones)
3479
  @param cl: if not None, luxi client to use
3480
  @type nowarn: boolean
3481
  @param nowarn: by default, this function will output a note with the
3482
      offline nodes that are skipped; if this parameter is True the
3483
      note is not displayed
3484
  @type secondary_ips: boolean
3485
  @param secondary_ips: if True, return the secondary IPs instead of the
3486
      names, useful for doing network traffic over the replication interface
3487
      (if any)
3488
  @type filter_master: boolean
3489
  @param filter_master: if True, do not return the master node in the list
3490
      (useful in coordination with secondary_ips where we cannot check our
3491
      node name against the list)
3492
  @type nodegroup: string
3493
  @param nodegroup: If set, only return nodes in this node group
3494

3495
  """
3496
  if cl is None:
3497
    cl = GetClient(query=True)
3498

    
3499
  qfilter = []
3500

    
3501
  if nodes:
3502
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3503

    
3504
  if nodegroup is not None:
3505
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3506
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3507

    
3508
  if filter_master:
3509
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3510

    
3511
  if qfilter:
3512
    if len(qfilter) > 1:
3513
      final_filter = [qlang.OP_AND] + qfilter
3514
    else:
3515
      assert len(qfilter) == 1
3516
      final_filter = qfilter[0]
3517
  else:
3518
    final_filter = None
3519

    
3520
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3521

    
3522
  def _IsOffline(row):
3523
    (_, (_, offline), _) = row
3524
    return offline
3525

    
3526
  def _GetName(row):
3527
    ((_, name), _, _) = row
3528
    return name
3529

    
3530
  def _GetSip(row):
3531
    (_, _, (_, sip)) = row
3532
    return sip
3533

    
3534
  (offline, online) = compat.partition(result.data, _IsOffline)
3535

    
3536
  if offline and not nowarn:
3537
    ToStderr("Note: skipping offline node(s): %s" %
3538
             utils.CommaJoin(map(_GetName, offline)))
3539

    
3540
  if secondary_ips:
3541
    fn = _GetSip
3542
  else:
3543
    fn = _GetName
3544

    
3545
  return map(fn, online)
3546

    
3547

    
3548
def GetNodesSshPorts(nodes, cl):
3549
  """Retrieves SSH ports of given nodes.
3550

3551
  @param nodes: the names of nodes
3552
  @type nodes: a list of strings
3553
  @param cl: a client to use for the query
3554
  @type cl: L{Client}
3555
  @return: the list of SSH ports corresponding to the nodes
3556
  @rtype: a list of tuples
3557
  """
3558
  return map(lambda t: t[0],
3559
             cl.QueryNodes(names=nodes,
3560
                           fields=["ndp/ssh_port"],
3561
                           use_locking=False))
3562

    
3563

    
3564
def _ToStream(stream, txt, *args):
3565
  """Write a message to a stream, bypassing the logging system
3566

3567
  @type stream: file object
3568
  @param stream: the file to which we should write
3569
  @type txt: str
3570
  @param txt: the message
3571

3572
  """
3573
  try:
3574
    if args:
3575
      args = tuple(args)
3576
      stream.write(txt % args)
3577
    else:
3578
      stream.write(txt)
3579
    stream.write("\n")
3580
    stream.flush()
3581
  except IOError, err:
3582
    if err.errno == errno.EPIPE:
3583
      # our terminal went away, we'll exit
3584
      sys.exit(constants.EXIT_FAILURE)
3585
    else:
3586
      raise
3587

    
3588

    
3589
def ToStdout(txt, *args):
3590
  """Write a message to stdout only, bypassing the logging system
3591

3592
  This is just a wrapper over _ToStream.
3593

3594
  @type txt: str
3595
  @param txt: the message
3596

3597
  """
3598
  _ToStream(sys.stdout, txt, *args)
3599

    
3600

    
3601
def ToStderr(txt, *args):
3602
  """Write a message to stderr only, bypassing the logging system
3603

3604
  This is just a wrapper over _ToStream.
3605

3606
  @type txt: str
3607
  @param txt: the message
3608

3609
  """
3610
  _ToStream(sys.stderr, txt, *args)
3611

    
3612

    
3613
class JobExecutor(object):
3614
  """Class which manages the submission and execution of multiple jobs.
3615

3616
  Note that instances of this class should not be reused between
3617
  GetResults() calls.
3618

3619
  """
3620
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3621
    self.queue = []
3622
    if cl is None:
3623
      cl = GetClient()
3624
    self.cl = cl
3625
    self.verbose = verbose
3626
    self.jobs = []
3627
    self.opts = opts
3628
    self.feedback_fn = feedback_fn
3629
    self._counter = itertools.count()
3630

    
3631
  @staticmethod
3632
  def _IfName(name, fmt):
3633
    """Helper function for formatting name.
3634

3635
    """
3636
    if name:
3637
      return fmt % name
3638

    
3639
    return ""
3640

    
3641
  def QueueJob(self, name, *ops):
3642
    """Record a job for later submit.
3643

3644
    @type name: string
3645
    @param name: a description of the job, will be used in WaitJobSet
3646

3647
    """
3648
    SetGenericOpcodeOpts(ops, self.opts)
3649
    self.queue.append((self._counter.next(), name, ops))
3650

    
3651
  def AddJobId(self, name, status, job_id):
3652
    """Adds a job ID to the internal queue.
3653

3654
    """
3655
    self.jobs.append((self._counter.next(), status, job_id, name))
3656

    
3657
  def SubmitPending(self, each=False):
3658
    """Submit all pending jobs.
3659

3660
    """
3661
    if each:
3662
      results = []
3663
      for (_, _, ops) in self.queue:
3664
        # SubmitJob will remove the success status, but raise an exception if
3665
        # the submission fails, so we'll notice that anyway.
3666
        results.append([True, self.cl.SubmitJob(ops)[0]])
3667
    else:
3668
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3669
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3670
      self.jobs.append((idx, status, data, name))
3671

    
3672
  def _ChooseJob(self):
3673
    """Choose a non-waiting/queued job to poll next.
3674

3675
    """
3676
    assert self.jobs, "_ChooseJob called with empty job list"
3677

    
3678
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3679
                               ["status"])
3680
    assert result
3681

    
3682
    for job_data, status in zip(self.jobs, result):
3683
      if (isinstance(status, list) and status and
3684
          status[0] in (constants.JOB_STATUS_QUEUED,
3685
                        constants.JOB_STATUS_WAITING,
3686
                        constants.JOB_STATUS_CANCELING)):
3687
        # job is still present and waiting
3688
        continue
3689
      # good candidate found (either running job or lost job)
3690
      self.jobs.remove(job_data)
3691
      return job_data
3692

    
3693
    # no job found
3694
    return self.jobs.pop(0)
3695

    
3696
  def GetResults(self):
3697
    """Wait for and return the results of all jobs.
3698

3699
    @rtype: list
3700
    @return: list of tuples (success, job results), in the same order
3701
        as the submitted jobs; if a job has failed, instead of the result
3702
        there will be the error message
3703

3704
    """
3705
    if not self.jobs:
3706
      self.SubmitPending()
3707
    results = []
3708
    if self.verbose:
3709
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3710
      if ok_jobs:
3711
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3712

    
3713
    # first, remove any non-submitted jobs
3714
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3715
    for idx, _, jid, name in failures:
3716
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3717
      results.append((idx, False, jid))
3718

    
3719
    while self.jobs:
3720
      (idx, _, jid, name) = self._ChooseJob()
3721
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3722
      try:
3723
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3724
        success = True
3725
      except errors.JobLost, err:
3726
        _, job_result = FormatError(err)
3727
        ToStderr("Job %s%s has been archived, cannot check its result",
3728
                 jid, self._IfName(name, " for %s"))
3729
        success = False
3730
      except (errors.GenericError, luxi.ProtocolError), err:
3731
        _, job_result = FormatError(err)
3732
        success = False
3733
        # the error message will always be shown, verbose or not
3734
        ToStderr("Job %s%s has failed: %s",
3735
                 jid, self._IfName(name, " for %s"), job_result)
3736

    
3737
      results.append((idx, success, job_result))
3738

    
3739
    # sort based on the index, then drop it
3740
    results.sort()
3741
    results = [i[1:] for i in results]
3742

    
3743
    return results
3744

    
3745
  def WaitOrShow(self, wait):
3746
    """Wait for job results or only print the job IDs.
3747

3748
    @type wait: boolean
3749
    @param wait: whether to wait or not
3750

3751
    """
3752
    if wait:
3753
      return self.GetResults()
3754
    else:
3755
      if not self.jobs:
3756
        self.SubmitPending()
3757
      for _, status, result, name in self.jobs:
3758
        if status:
3759
          ToStdout("%s: %s", result, name)
3760
        else:
3761
          ToStderr("Failure for %s: %s", name, result)
3762
      return [row[1:3] for row in self.jobs]
3763

    
3764

    
3765
def FormatParamsDictInfo(param_dict, actual):
3766
  """Formats a parameter dictionary.
3767

3768
  @type param_dict: dict
3769
  @param param_dict: the own parameters
3770
  @type actual: dict
3771
  @param actual: the current parameter set (including defaults)
3772
  @rtype: dict
3773
  @return: dictionary where the value of each parameter is either a fully
3774
      formatted string or a dictionary containing formatted strings
3775

3776
  """
3777
  ret = {}
3778
  for (key, data) in actual.items():
3779
    if isinstance(data, dict) and data:
3780
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3781
    else:
3782
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3783
  return ret
3784

    
3785

    
3786
def _FormatListInfoDefault(data, def_data):
3787
  if data is not None:
3788
    ret = utils.CommaJoin(data)
3789
  else:
3790
    ret = "default (%s)" % utils.CommaJoin(def_data)
3791
  return ret
3792

    
3793

    
3794
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3795
  """Formats an instance policy.
3796

3797
  @type custom_ipolicy: dict
3798
  @param custom_ipolicy: own policy
3799
  @type eff_ipolicy: dict
3800
  @param eff_ipolicy: effective policy (including defaults); ignored for
3801