Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ e33c9e68

History | View | Annotate | Download (136.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import rpc
41
from ganeti import ssh
42
from ganeti import compat
43
from ganeti import netutils
44
from ganeti import qlang
45
from ganeti import objects
46
from ganeti import pathutils
47

    
48
from ganeti.runtime import (GetClient)
49

    
50
from optparse import (OptionParser, TitledHelpFormatter,
51
                      Option, OptionValueError)
52

    
53

    
54
__all__ = [
55
  # Command line options
56
  "ABSOLUTE_OPT",
57
  "ADD_UIDS_OPT",
58
  "ADD_RESERVED_IPS_OPT",
59
  "ALLOCATABLE_OPT",
60
  "ALLOC_POLICY_OPT",
61
  "ALL_OPT",
62
  "ALLOW_FAILOVER_OPT",
63
  "AUTO_PROMOTE_OPT",
64
  "AUTO_REPLACE_OPT",
65
  "BACKEND_OPT",
66
  "BLK_OS_OPT",
67
  "CAPAB_MASTER_OPT",
68
  "CAPAB_VM_OPT",
69
  "CLEANUP_OPT",
70
  "CLUSTER_DOMAIN_SECRET_OPT",
71
  "CONFIRM_OPT",
72
  "CP_SIZE_OPT",
73
  "DEBUG_OPT",
74
  "DEBUG_SIMERR_OPT",
75
  "DISKIDX_OPT",
76
  "DISK_OPT",
77
  "DISK_PARAMS_OPT",
78
  "DISK_TEMPLATE_OPT",
79
  "DRAINED_OPT",
80
  "DRY_RUN_OPT",
81
  "DRBD_HELPER_OPT",
82
  "DST_NODE_OPT",
83
  "EARLY_RELEASE_OPT",
84
  "ENABLED_HV_OPT",
85
  "ENABLED_DISK_TEMPLATES_OPT",
86
  "ERROR_CODES_OPT",
87
  "FAILURE_ONLY_OPT",
88
  "FIELDS_OPT",
89
  "FILESTORE_DIR_OPT",
90
  "FILESTORE_DRIVER_OPT",
91
  "FORCE_FILTER_OPT",
92
  "FORCE_OPT",
93
  "FORCE_VARIANT_OPT",
94
  "GATEWAY_OPT",
95
  "GATEWAY6_OPT",
96
  "GLOBAL_FILEDIR_OPT",
97
  "HID_OS_OPT",
98
  "GLOBAL_SHARED_FILEDIR_OPT",
99
  "HOTPLUG_OPT",
100
  "HVLIST_OPT",
101
  "HVOPTS_OPT",
102
  "HYPERVISOR_OPT",
103
  "IALLOCATOR_OPT",
104
  "DEFAULT_IALLOCATOR_OPT",
105
  "IDENTIFY_DEFAULTS_OPT",
106
  "IGNORE_CONSIST_OPT",
107
  "IGNORE_ERRORS_OPT",
108
  "IGNORE_FAILURES_OPT",
109
  "IGNORE_OFFLINE_OPT",
110
  "IGNORE_REMOVE_FAILURES_OPT",
111
  "IGNORE_SECONDARIES_OPT",
112
  "IGNORE_SIZE_OPT",
113
  "INCLUDEDEFAULTS_OPT",
114
  "INTERVAL_OPT",
115
  "MAC_PREFIX_OPT",
116
  "MAINTAIN_NODE_HEALTH_OPT",
117
  "MASTER_NETDEV_OPT",
118
  "MASTER_NETMASK_OPT",
119
  "MC_OPT",
120
  "MIGRATION_MODE_OPT",
121
  "MODIFY_ETCHOSTS_OPT",
122
  "NET_OPT",
123
  "NETWORK_OPT",
124
  "NETWORK6_OPT",
125
  "NEW_CLUSTER_CERT_OPT",
126
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
127
  "NEW_CONFD_HMAC_KEY_OPT",
128
  "NEW_RAPI_CERT_OPT",
129
  "NEW_PRIMARY_OPT",
130
  "NEW_SECONDARY_OPT",
131
  "NEW_SPICE_CERT_OPT",
132
  "NIC_PARAMS_OPT",
133
  "NOCONFLICTSCHECK_OPT",
134
  "NODE_FORCE_JOIN_OPT",
135
  "NODE_LIST_OPT",
136
  "NODE_PLACEMENT_OPT",
137
  "NODEGROUP_OPT",
138
  "NODE_PARAMS_OPT",
139
  "NODE_POWERED_OPT",
140
  "NOHDR_OPT",
141
  "NOIPCHECK_OPT",
142
  "NO_INSTALL_OPT",
143
  "NONAMECHECK_OPT",
144
  "NOMODIFY_ETCHOSTS_OPT",
145
  "NOMODIFY_SSH_SETUP_OPT",
146
  "NONICS_OPT",
147
  "NONLIVE_OPT",
148
  "NONPLUS1_OPT",
149
  "NORUNTIME_CHGS_OPT",
150
  "NOSHUTDOWN_OPT",
151
  "NOSTART_OPT",
152
  "NOSSH_KEYCHECK_OPT",
153
  "NOVOTING_OPT",
154
  "NO_REMEMBER_OPT",
155
  "NWSYNC_OPT",
156
  "OFFLINE_INST_OPT",
157
  "ONLINE_INST_OPT",
158
  "ON_PRIMARY_OPT",
159
  "ON_SECONDARY_OPT",
160
  "OFFLINE_OPT",
161
  "OSPARAMS_OPT",
162
  "OS_OPT",
163
  "OS_SIZE_OPT",
164
  "OOB_TIMEOUT_OPT",
165
  "POWER_DELAY_OPT",
166
  "PREALLOC_WIPE_DISKS_OPT",
167
  "PRIMARY_IP_VERSION_OPT",
168
  "PRIMARY_ONLY_OPT",
169
  "PRINT_JOBID_OPT",
170
  "PRIORITY_OPT",
171
  "RAPI_CERT_OPT",
172
  "READD_OPT",
173
  "REASON_OPT",
174
  "REBOOT_TYPE_OPT",
175
  "REMOVE_INSTANCE_OPT",
176
  "REMOVE_RESERVED_IPS_OPT",
177
  "REMOVE_UIDS_OPT",
178
  "RESERVED_LVS_OPT",
179
  "RUNTIME_MEM_OPT",
180
  "ROMAN_OPT",
181
  "SECONDARY_IP_OPT",
182
  "SECONDARY_ONLY_OPT",
183
  "SELECT_OS_OPT",
184
  "SEP_OPT",
185
  "SHOWCMD_OPT",
186
  "SHOW_MACHINE_OPT",
187
  "COMPRESS_OPT",
188
  "SHUTDOWN_TIMEOUT_OPT",
189
  "SINGLE_NODE_OPT",
190
  "SPECS_CPU_COUNT_OPT",
191
  "SPECS_DISK_COUNT_OPT",
192
  "SPECS_DISK_SIZE_OPT",
193
  "SPECS_MEM_SIZE_OPT",
194
  "SPECS_NIC_COUNT_OPT",
195
  "SPLIT_ISPECS_OPTS",
196
  "IPOLICY_STD_SPECS_OPT",
197
  "IPOLICY_DISK_TEMPLATES",
198
  "IPOLICY_VCPU_RATIO",
199
  "SPICE_CACERT_OPT",
200
  "SPICE_CERT_OPT",
201
  "SRC_DIR_OPT",
202
  "SRC_NODE_OPT",
203
  "SUBMIT_OPT",
204
  "SUBMIT_OPTS",
205
  "STARTUP_PAUSED_OPT",
206
  "STATIC_OPT",
207
  "SYNC_OPT",
208
  "TAG_ADD_OPT",
209
  "TAG_SRC_OPT",
210
  "TIMEOUT_OPT",
211
  "TO_GROUP_OPT",
212
  "UIDPOOL_OPT",
213
  "USEUNITS_OPT",
214
  "USE_EXTERNAL_MIP_SCRIPT",
215
  "USE_REPL_NET_OPT",
216
  "VERBOSE_OPT",
217
  "VG_NAME_OPT",
218
  "WFSYNC_OPT",
219
  "YES_DOIT_OPT",
220
  "DISK_STATE_OPT",
221
  "HV_STATE_OPT",
222
  "IGNORE_IPOLICY_OPT",
223
  "INSTANCE_POLICY_OPTS",
224
  # Generic functions for CLI programs
225
  "ConfirmOperation",
226
  "CreateIPolicyFromOpts",
227
  "GenericMain",
228
  "GenericInstanceCreate",
229
  "GenericList",
230
  "GenericListFields",
231
  "GetClient",
232
  "GetOnlineNodes",
233
  "GetNodesSshPorts",
234
  "JobExecutor",
235
  "JobSubmittedException",
236
  "ParseTimespec",
237
  "RunWhileClusterStopped",
238
  "SubmitOpCode",
239
  "SubmitOpCodeToDrainedQueue",
240
  "SubmitOrSend",
241
  "UsesRPC",
242
  # Formatting functions
243
  "ToStderr", "ToStdout",
244
  "FormatError",
245
  "FormatQueryResult",
246
  "FormatParamsDictInfo",
247
  "FormatPolicyInfo",
248
  "PrintIPolicyCommand",
249
  "PrintGenericInfo",
250
  "GenerateTable",
251
  "AskUser",
252
  "FormatTimestamp",
253
  "FormatLogMessage",
254
  # Tags functions
255
  "ListTags",
256
  "AddTags",
257
  "RemoveTags",
258
  # command line options support infrastructure
259
  "ARGS_MANY_INSTANCES",
260
  "ARGS_MANY_NODES",
261
  "ARGS_MANY_GROUPS",
262
  "ARGS_MANY_NETWORKS",
263
  "ARGS_NONE",
264
  "ARGS_ONE_INSTANCE",
265
  "ARGS_ONE_NODE",
266
  "ARGS_ONE_GROUP",
267
  "ARGS_ONE_OS",
268
  "ARGS_ONE_NETWORK",
269
  "ArgChoice",
270
  "ArgCommand",
271
  "ArgFile",
272
  "ArgGroup",
273
  "ArgHost",
274
  "ArgInstance",
275
  "ArgJobId",
276
  "ArgNetwork",
277
  "ArgNode",
278
  "ArgOs",
279
  "ArgExtStorage",
280
  "ArgSuggest",
281
  "ArgUnknown",
282
  "OPT_COMPL_INST_ADD_NODES",
283
  "OPT_COMPL_MANY_NODES",
284
  "OPT_COMPL_ONE_IALLOCATOR",
285
  "OPT_COMPL_ONE_INSTANCE",
286
  "OPT_COMPL_ONE_NODE",
287
  "OPT_COMPL_ONE_NODEGROUP",
288
  "OPT_COMPL_ONE_NETWORK",
289
  "OPT_COMPL_ONE_OS",
290
  "OPT_COMPL_ONE_EXTSTORAGE",
291
  "cli_option",
292
  "SplitNodeOption",
293
  "CalculateOSNames",
294
  "ParseFields",
295
  "COMMON_CREATE_OPTS",
296
  ]
297

    
298
NO_PREFIX = "no_"
299
UN_PREFIX = "-"
300

    
301
#: Priorities (sorted)
302
_PRIORITY_NAMES = [
303
  ("low", constants.OP_PRIO_LOW),
304
  ("normal", constants.OP_PRIO_NORMAL),
305
  ("high", constants.OP_PRIO_HIGH),
306
  ]
307

    
308
#: Priority dictionary for easier lookup
309
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
310
# we migrate to Python 2.6
311
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
312

    
313
# Query result status for clients
314
(QR_NORMAL,
315
 QR_UNKNOWN,
316
 QR_INCOMPLETE) = range(3)
317

    
318
#: Maximum batch size for ChooseJob
319
_CHOOSE_BATCH = 25
320

    
321

    
322
# constants used to create InstancePolicy dictionary
323
TISPECS_GROUP_TYPES = {
324
  constants.ISPECS_MIN: constants.VTYPE_INT,
325
  constants.ISPECS_MAX: constants.VTYPE_INT,
326
  }
327

    
328
TISPECS_CLUSTER_TYPES = {
329
  constants.ISPECS_MIN: constants.VTYPE_INT,
330
  constants.ISPECS_MAX: constants.VTYPE_INT,
331
  constants.ISPECS_STD: constants.VTYPE_INT,
332
  }
333

    
334
#: User-friendly names for query2 field types
335
_QFT_NAMES = {
336
  constants.QFT_UNKNOWN: "Unknown",
337
  constants.QFT_TEXT: "Text",
338
  constants.QFT_BOOL: "Boolean",
339
  constants.QFT_NUMBER: "Number",
340
  constants.QFT_UNIT: "Storage size",
341
  constants.QFT_TIMESTAMP: "Timestamp",
342
  constants.QFT_OTHER: "Custom",
343
  }
344

    
345

    
346
class _Argument:
347
  def __init__(self, min=0, max=None): # pylint: disable=W0622
348
    self.min = min
349
    self.max = max
350

    
351
  def __repr__(self):
352
    return ("<%s min=%s max=%s>" %
353
            (self.__class__.__name__, self.min, self.max))
354

    
355

    
356
class ArgSuggest(_Argument):
357
  """Suggesting argument.
358

359
  Value can be any of the ones passed to the constructor.
360

361
  """
362
  # pylint: disable=W0622
363
  def __init__(self, min=0, max=None, choices=None):
364
    _Argument.__init__(self, min=min, max=max)
365
    self.choices = choices
366

    
367
  def __repr__(self):
368
    return ("<%s min=%s max=%s choices=%r>" %
369
            (self.__class__.__name__, self.min, self.max, self.choices))
370

    
371

    
372
class ArgChoice(ArgSuggest):
373
  """Choice argument.
374

375
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
376
  but value must be one of the choices.
377

378
  """
379

    
380

    
381
class ArgUnknown(_Argument):
382
  """Unknown argument to program (e.g. determined at runtime).
383

384
  """
385

    
386

    
387
class ArgInstance(_Argument):
388
  """Instances argument.
389

390
  """
391

    
392

    
393
class ArgNode(_Argument):
394
  """Node argument.
395

396
  """
397

    
398

    
399
class ArgNetwork(_Argument):
400
  """Network argument.
401

402
  """
403

    
404

    
405
class ArgGroup(_Argument):
406
  """Node group argument.
407

408
  """
409

    
410

    
411
class ArgJobId(_Argument):
412
  """Job ID argument.
413

414
  """
415

    
416

    
417
class ArgFile(_Argument):
418
  """File path argument.
419

420
  """
421

    
422

    
423
class ArgCommand(_Argument):
424
  """Command argument.
425

426
  """
427

    
428

    
429
class ArgHost(_Argument):
430
  """Host argument.
431

432
  """
433

    
434

    
435
class ArgOs(_Argument):
436
  """OS argument.
437

438
  """
439

    
440

    
441
class ArgExtStorage(_Argument):
442
  """ExtStorage argument.
443

444
  """
445

    
446

    
447
ARGS_NONE = []
448
ARGS_MANY_INSTANCES = [ArgInstance()]
449
ARGS_MANY_NETWORKS = [ArgNetwork()]
450
ARGS_MANY_NODES = [ArgNode()]
451
ARGS_MANY_GROUPS = [ArgGroup()]
452
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
453
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
454
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
455
# TODO
456
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
457
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
458

    
459

    
460
def _ExtractTagsObject(opts, args):
461
  """Extract the tag type object.
462

463
  Note that this function will modify its args parameter.
464

465
  """
466
  if not hasattr(opts, "tag_type"):
467
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
468
  kind = opts.tag_type
469
  if kind == constants.TAG_CLUSTER:
470
    retval = kind, ""
471
  elif kind in (constants.TAG_NODEGROUP,
472
                constants.TAG_NODE,
473
                constants.TAG_NETWORK,
474
                constants.TAG_INSTANCE):
475
    if not args:
476
      raise errors.OpPrereqError("no arguments passed to the command",
477
                                 errors.ECODE_INVAL)
478
    name = args.pop(0)
479
    retval = kind, name
480
  else:
481
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
482
  return retval
483

    
484

    
485
def _ExtendTags(opts, args):
486
  """Extend the args if a source file has been given.
487

488
  This function will extend the tags with the contents of the file
489
  passed in the 'tags_source' attribute of the opts parameter. A file
490
  named '-' will be replaced by stdin.
491

492
  """
493
  fname = opts.tags_source
494
  if fname is None:
495
    return
496
  if fname == "-":
497
    new_fh = sys.stdin
498
  else:
499
    new_fh = open(fname, "r")
500
  new_data = []
501
  try:
502
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
503
    # because of python bug 1633941
504
    while True:
505
      line = new_fh.readline()
506
      if not line:
507
        break
508
      new_data.append(line.strip())
509
  finally:
510
    new_fh.close()
511
  args.extend(new_data)
512

    
513

    
514
def ListTags(opts, args):
515
  """List the tags on a given object.
516

517
  This is a generic implementation that knows how to deal with all
518
  three cases of tag objects (cluster, node, instance). The opts
519
  argument is expected to contain a tag_type field denoting what
520
  object type we work on.
521

522
  """
523
  kind, name = _ExtractTagsObject(opts, args)
524
  cl = GetClient(query=True)
525
  result = cl.QueryTags(kind, name)
526
  result = list(result)
527
  result.sort()
528
  for tag in result:
529
    ToStdout(tag)
530

    
531

    
532
def AddTags(opts, args):
533
  """Add tags on a given object.
534

535
  This is a generic implementation that knows how to deal with all
536
  three cases of tag objects (cluster, node, instance). The opts
537
  argument is expected to contain a tag_type field denoting what
538
  object type we work on.
539

540
  """
541
  kind, name = _ExtractTagsObject(opts, args)
542
  _ExtendTags(opts, args)
543
  if not args:
544
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
545
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
546
  SubmitOrSend(op, opts)
547

    
548

    
549
def RemoveTags(opts, args):
550
  """Remove tags from a given object.
551

552
  This is a generic implementation that knows how to deal with all
553
  three cases of tag objects (cluster, node, instance). The opts
554
  argument is expected to contain a tag_type field denoting what
555
  object type we work on.
556

557
  """
558
  kind, name = _ExtractTagsObject(opts, args)
559
  _ExtendTags(opts, args)
560
  if not args:
561
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
562
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
563
  SubmitOrSend(op, opts)
564

    
565

    
566
def check_unit(option, opt, value): # pylint: disable=W0613
567
  """OptParsers custom converter for units.
568

569
  """
570
  try:
571
    return utils.ParseUnit(value)
572
  except errors.UnitParseError, err:
573
    raise OptionValueError("option %s: %s" % (opt, err))
574

    
575

    
576
def _SplitKeyVal(opt, data, parse_prefixes):
577
  """Convert a KeyVal string into a dict.
578

579
  This function will convert a key=val[,...] string into a dict. Empty
580
  values will be converted specially: keys which have the prefix 'no_'
581
  will have the value=False and the prefix stripped, keys with the prefix
582
  "-" will have value=None and the prefix stripped, and the others will
583
  have value=True.
584

585
  @type opt: string
586
  @param opt: a string holding the option name for which we process the
587
      data, used in building error messages
588
  @type data: string
589
  @param data: a string of the format key=val,key=val,...
590
  @type parse_prefixes: bool
591
  @param parse_prefixes: whether to handle prefixes specially
592
  @rtype: dict
593
  @return: {key=val, key=val}
594
  @raises errors.ParameterError: if there are duplicate keys
595

596
  """
597
  kv_dict = {}
598
  if data:
599
    for elem in utils.UnescapeAndSplit(data, sep=","):
600
      if "=" in elem:
601
        key, val = elem.split("=", 1)
602
      elif parse_prefixes:
603
        if elem.startswith(NO_PREFIX):
604
          key, val = elem[len(NO_PREFIX):], False
605
        elif elem.startswith(UN_PREFIX):
606
          key, val = elem[len(UN_PREFIX):], None
607
        else:
608
          key, val = elem, True
609
      else:
610
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
611
                                    (elem, opt))
612
      if key in kv_dict:
613
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
614
                                    (key, opt))
615
      kv_dict[key] = val
616
  return kv_dict
617

    
618

    
619
def _SplitIdentKeyVal(opt, value, parse_prefixes):
620
  """Helper function to parse "ident:key=val,key=val" options.
621

622
  @type opt: string
623
  @param opt: option name, used in error messages
624
  @type value: string
625
  @param value: expected to be in the format "ident:key=val,key=val,..."
626
  @type parse_prefixes: bool
627
  @param parse_prefixes: whether to handle prefixes specially (see
628
      L{_SplitKeyVal})
629
  @rtype: tuple
630
  @return: (ident, {key=val, key=val})
631
  @raises errors.ParameterError: in case of duplicates or other parsing errors
632

633
  """
634
  if ":" not in value:
635
    ident, rest = value, ""
636
  else:
637
    ident, rest = value.split(":", 1)
638

    
639
  if parse_prefixes and ident.startswith(NO_PREFIX):
640
    if rest:
641
      msg = "Cannot pass options when removing parameter groups: %s" % value
642
      raise errors.ParameterError(msg)
643
    retval = (ident[len(NO_PREFIX):], False)
644
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
645
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
646
    if rest:
647
      msg = "Cannot pass options when removing parameter groups: %s" % value
648
      raise errors.ParameterError(msg)
649
    retval = (ident[len(UN_PREFIX):], None)
650
  else:
651
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
652
    retval = (ident, kv_dict)
653
  return retval
654

    
655

    
656
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
657
  """Custom parser for ident:key=val,key=val options.
658

659
  This will store the parsed values as a tuple (ident, {key: val}). As such,
660
  multiple uses of this option via action=append is possible.
661

662
  """
663
  return _SplitIdentKeyVal(opt, value, True)
664

    
665

    
666
def check_key_val(option, opt, value):  # pylint: disable=W0613
667
  """Custom parser class for key=val,key=val options.
668

669
  This will store the parsed values as a dict {key: val}.
670

671
  """
672
  return _SplitKeyVal(opt, value, True)
673

    
674

    
675
def _SplitListKeyVal(opt, value):
676
  retval = {}
677
  for elem in value.split("/"):
678
    if not elem:
679
      raise errors.ParameterError("Empty section in option '%s'" % opt)
680
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
681
    if ident in retval:
682
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
683
             (ident, opt, elem))
684
      raise errors.ParameterError(msg)
685
    retval[ident] = valdict
686
  return retval
687

    
688

    
689
def check_multilist_ident_key_val(_, opt, value):
690
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
691

692
  @rtype: list of dictionary
693
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
694

695
  """
696
  retval = []
697
  for line in value.split("//"):
698
    retval.append(_SplitListKeyVal(opt, line))
699
  return retval
700

    
701

    
702
def check_bool(option, opt, value): # pylint: disable=W0613
703
  """Custom parser for yes/no options.
704

705
  This will store the parsed value as either True or False.
706

707
  """
708
  value = value.lower()
709
  if value == constants.VALUE_FALSE or value == "no":
710
    return False
711
  elif value == constants.VALUE_TRUE or value == "yes":
712
    return True
713
  else:
714
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
715

    
716

    
717
def check_list(option, opt, value): # pylint: disable=W0613
718
  """Custom parser for comma-separated lists.
719

720
  """
721
  # we have to make this explicit check since "".split(",") is [""],
722
  # not an empty list :(
723
  if not value:
724
    return []
725
  else:
726
    return utils.UnescapeAndSplit(value)
727

    
728

    
729
def check_maybefloat(option, opt, value): # pylint: disable=W0613
730
  """Custom parser for float numbers which might be also defaults.
731

732
  """
733
  value = value.lower()
734

    
735
  if value == constants.VALUE_DEFAULT:
736
    return value
737
  else:
738
    return float(value)
739

    
740

    
741
# completion_suggestion is normally a list. Using numeric values not evaluating
742
# to False for dynamic completion.
743
(OPT_COMPL_MANY_NODES,
744
 OPT_COMPL_ONE_NODE,
745
 OPT_COMPL_ONE_INSTANCE,
746
 OPT_COMPL_ONE_OS,
747
 OPT_COMPL_ONE_EXTSTORAGE,
748
 OPT_COMPL_ONE_IALLOCATOR,
749
 OPT_COMPL_ONE_NETWORK,
750
 OPT_COMPL_INST_ADD_NODES,
751
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
752

    
753
OPT_COMPL_ALL = compat.UniqueFrozenset([
754
  OPT_COMPL_MANY_NODES,
755
  OPT_COMPL_ONE_NODE,
756
  OPT_COMPL_ONE_INSTANCE,
757
  OPT_COMPL_ONE_OS,
758
  OPT_COMPL_ONE_EXTSTORAGE,
759
  OPT_COMPL_ONE_IALLOCATOR,
760
  OPT_COMPL_ONE_NETWORK,
761
  OPT_COMPL_INST_ADD_NODES,
762
  OPT_COMPL_ONE_NODEGROUP,
763
  ])
764

    
765

    
766
class CliOption(Option):
767
  """Custom option class for optparse.
768

769
  """
770
  ATTRS = Option.ATTRS + [
771
    "completion_suggest",
772
    ]
773
  TYPES = Option.TYPES + (
774
    "multilistidentkeyval",
775
    "identkeyval",
776
    "keyval",
777
    "unit",
778
    "bool",
779
    "list",
780
    "maybefloat",
781
    )
782
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
783
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
784
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
785
  TYPE_CHECKER["keyval"] = check_key_val
786
  TYPE_CHECKER["unit"] = check_unit
787
  TYPE_CHECKER["bool"] = check_bool
788
  TYPE_CHECKER["list"] = check_list
789
  TYPE_CHECKER["maybefloat"] = check_maybefloat
790

    
791

    
792
# optparse.py sets make_option, so we do it for our own option class, too
793
cli_option = CliOption
794

    
795

    
796
_YORNO = "yes|no"
797

    
798
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
799
                       help="Increase debugging level")
800

    
801
NOHDR_OPT = cli_option("--no-headers", default=False,
802
                       action="store_true", dest="no_headers",
803
                       help="Don't display column headers")
804

    
805
SEP_OPT = cli_option("--separator", default=None,
806
                     action="store", dest="separator",
807
                     help=("Separator between output fields"
808
                           " (defaults to one space)"))
809

    
810
USEUNITS_OPT = cli_option("--units", default=None,
811
                          dest="units", choices=("h", "m", "g", "t"),
812
                          help="Specify units for output (one of h/m/g/t)")
813

    
814
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
815
                        type="string", metavar="FIELDS",
816
                        help="Comma separated list of output fields")
817

    
818
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
819
                       default=False, help="Force the operation")
820

    
821
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
822
                         default=False, help="Do not require confirmation")
823

    
824
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
825
                                  action="store_true", default=False,
826
                                  help=("Ignore offline nodes and do as much"
827
                                        " as possible"))
828

    
829
TAG_ADD_OPT = cli_option("--tags", dest="tags",
830
                         default=None, help="Comma-separated list of instance"
831
                                            " tags")
832

    
833
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
834
                         default=None, help="File with tag names")
835

    
836
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
837
                        default=False, action="store_true",
838
                        help=("Submit the job and return the job ID, but"
839
                              " don't wait for the job to finish"))
840

    
841
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
842
                             default=False, action="store_true",
843
                             help=("Additionally print the job as first line"
844
                                   " on stdout (for scripting)."))
845

    
846
SYNC_OPT = cli_option("--sync", dest="do_locking",
847
                      default=False, action="store_true",
848
                      help=("Grab locks while doing the queries"
849
                            " in order to ensure more consistent results"))
850

    
851
DRY_RUN_OPT = cli_option("--dry-run", default=False,
852
                         action="store_true",
853
                         help=("Do not execute the operation, just run the"
854
                               " check steps and verify if it could be"
855
                               " executed"))
856

    
857
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
858
                         action="store_true",
859
                         help="Increase the verbosity of the operation")
860

    
861
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
862
                              action="store_true", dest="simulate_errors",
863
                              help="Debugging option that makes the operation"
864
                              " treat most runtime checks as failed")
865

    
866
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
867
                        default=True, action="store_false",
868
                        help="Don't wait for sync (DANGEROUS!)")
869

    
870
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
871
                        default=False, action="store_true",
872
                        help="Wait for disks to sync")
873

    
874
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
875
                             action="store_true", default=False,
876
                             help="Enable offline instance")
877

    
878
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
879
                              action="store_true", default=False,
880
                              help="Disable down instance")
881

    
882
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
883
                               help=("Custom disk setup (%s)" %
884
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
885
                               default=None, metavar="TEMPL",
886
                               choices=list(constants.DISK_TEMPLATES))
887

    
888
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
889
                        help="Do not create any network cards for"
890
                        " the instance")
891

    
892
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
893
                               help="Relative path under default cluster-wide"
894
                               " file storage dir to store file-based disks",
895
                               default=None, metavar="<DIR>")
896

    
897
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
898
                                  help="Driver to use for image files",
899
                                  default=None, metavar="<DRIVER>",
900
                                  choices=list(constants.FILE_DRIVER))
901

    
902
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
903
                            help="Select nodes for the instance automatically"
904
                            " using the <NAME> iallocator plugin",
905
                            default=None, type="string",
906
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
907

    
908
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
909
                                    metavar="<NAME>",
910
                                    help="Set the default instance"
911
                                    " allocator plugin",
912
                                    default=None, type="string",
913
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
914

    
915
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
916
                    metavar="<os>",
917
                    completion_suggest=OPT_COMPL_ONE_OS)
918

    
919
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
920
                          type="keyval", default={},
921
                          help="OS parameters")
922

    
923
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
924
                               action="store_true", default=False,
925
                               help="Force an unknown variant")
926

    
927
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
928
                            action="store_true", default=False,
929
                            help="Do not install the OS (will"
930
                            " enable no-start)")
931

    
932
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
933
                                dest="allow_runtime_chgs",
934
                                default=True, action="store_false",
935
                                help="Don't allow runtime changes")
936

    
937
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
938
                         type="keyval", default={},
939
                         help="Backend parameters")
940

    
941
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
942
                        default={}, dest="hvparams",
943
                        help="Hypervisor parameters")
944

    
945
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
946
                             help="Disk template parameters, in the format"
947
                             " template:option=value,option=value,...",
948
                             type="identkeyval", action="append", default=[])
949

    
950
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
951
                                 type="keyval", default={},
952
                                 help="Memory size specs: list of key=value,"
953
                                " where key is one of min, max, std"
954
                                 " (in MB or using a unit)")
955

    
956
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
957
                                 type="keyval", default={},
958
                                 help="CPU count specs: list of key=value,"
959
                                 " where key is one of min, max, std")
960

    
961
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
962
                                  dest="ispecs_disk_count",
963
                                  type="keyval", default={},
964
                                  help="Disk count specs: list of key=value,"
965
                                  " where key is one of min, max, std")
966

    
967
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
968
                                 type="keyval", default={},
969
                                 help="Disk size specs: list of key=value,"
970
                                 " where key is one of min, max, std"
971
                                 " (in MB or using a unit)")
972

    
973
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
974
                                 type="keyval", default={},
975
                                 help="NIC count specs: list of key=value,"
976
                                 " where key is one of min, max, std")
977

    
978
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
979
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
980
                                      dest="ipolicy_bounds_specs",
981
                                      type="multilistidentkeyval", default=None,
982
                                      help="Complete instance specs limits")
983

    
984
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
985
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
986
                                   dest="ipolicy_std_specs",
987
                                   type="keyval", default=None,
988
                                   help="Complte standard instance specs")
989

    
990
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
991
                                    dest="ipolicy_disk_templates",
992
                                    type="list", default=None,
993
                                    help="Comma-separated list of"
994
                                    " enabled disk templates")
995

    
996
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
997
                                 dest="ipolicy_vcpu_ratio",
998
                                 type="maybefloat", default=None,
999
                                 help="The maximum allowed vcpu-to-cpu ratio")
1000

    
1001
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1002
                                   dest="ipolicy_spindle_ratio",
1003
                                   type="maybefloat", default=None,
1004
                                   help=("The maximum allowed instances to"
1005
                                         " spindle ratio"))
1006

    
1007
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1008
                            help="Hypervisor and hypervisor options, in the"
1009
                            " format hypervisor:option=value,option=value,...",
1010
                            default=None, type="identkeyval")
1011

    
1012
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1013
                        help="Hypervisor and hypervisor options, in the"
1014
                        " format hypervisor:option=value,option=value,...",
1015
                        default=[], action="append", type="identkeyval")
1016

    
1017
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1018
                           action="store_false",
1019
                           help="Don't check that the instance's IP"
1020
                           " is alive")
1021

    
1022
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1023
                             default=True, action="store_false",
1024
                             help="Don't check that the instance's name"
1025
                             " is resolvable")
1026

    
1027
NET_OPT = cli_option("--net",
1028
                     help="NIC parameters", default=[],
1029
                     dest="nics", action="append", type="identkeyval")
1030

    
1031
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1032
                      dest="disks", action="append", type="identkeyval")
1033

    
1034
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1035
                         help="Comma-separated list of disks"
1036
                         " indices to act on (e.g. 0,2) (optional,"
1037
                         " defaults to all disks)")
1038

    
1039
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1040
                         help="Enforces a single-disk configuration using the"
1041
                         " given disk size, in MiB unless a suffix is used",
1042
                         default=None, type="unit", metavar="<size>")
1043

    
1044
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1045
                                dest="ignore_consistency",
1046
                                action="store_true", default=False,
1047
                                help="Ignore the consistency of the disks on"
1048
                                " the secondary")
1049

    
1050
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1051
                                dest="allow_failover",
1052
                                action="store_true", default=False,
1053
                                help="If migration is not possible fallback to"
1054
                                     " failover")
1055

    
1056
NONLIVE_OPT = cli_option("--non-live", dest="live",
1057
                         default=True, action="store_false",
1058
                         help="Do a non-live migration (this usually means"
1059
                         " freeze the instance, save the state, transfer and"
1060
                         " only then resume running on the secondary node)")
1061

    
1062
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1063
                                default=None,
1064
                                choices=list(constants.HT_MIGRATION_MODES),
1065
                                help="Override default migration mode (choose"
1066
                                " either live or non-live")
1067

    
1068
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1069
                                help="Target node and optional secondary node",
1070
                                metavar="<pnode>[:<snode>]",
1071
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1072

    
1073
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1074
                           action="append", metavar="<node>",
1075
                           help="Use only this node (can be used multiple"
1076
                           " times, if not given defaults to all nodes)",
1077
                           completion_suggest=OPT_COMPL_ONE_NODE)
1078

    
1079
NODEGROUP_OPT_NAME = "--node-group"
1080
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1081
                           dest="nodegroup",
1082
                           help="Node group (name or uuid)",
1083
                           metavar="<nodegroup>",
1084
                           default=None, type="string",
1085
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1086

    
1087
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1088
                             metavar="<node>",
1089
                             completion_suggest=OPT_COMPL_ONE_NODE)
1090

    
1091
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1092
                         action="store_false",
1093
                         help="Don't start the instance after creation")
1094

    
1095
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1096
                         action="store_true", default=False,
1097
                         help="Show command instead of executing it")
1098

    
1099
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1100
                         default=False, action="store_true",
1101
                         help="Instead of performing the migration/failover,"
1102
                         " try to recover from a failed cleanup. This is safe"
1103
                         " to run even if the instance is healthy, but it"
1104
                         " will create extra replication traffic and "
1105
                         " disrupt briefly the replication (like during the"
1106
                         " migration/failover")
1107

    
1108
STATIC_OPT = cli_option("-s", "--static", dest="static",
1109
                        action="store_true", default=False,
1110
                        help="Only show configuration data, not runtime data")
1111

    
1112
ALL_OPT = cli_option("--all", dest="show_all",
1113
                     default=False, action="store_true",
1114
                     help="Show info on all instances on the cluster."
1115
                     " This can take a long time to run, use wisely")
1116

    
1117
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1118
                           action="store_true", default=False,
1119
                           help="Interactive OS reinstall, lists available"
1120
                           " OS templates for selection")
1121

    
1122
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1123
                                 action="store_true", default=False,
1124
                                 help="Remove the instance from the cluster"
1125
                                 " configuration even if there are failures"
1126
                                 " during the removal process")
1127

    
1128
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1129
                                        dest="ignore_remove_failures",
1130
                                        action="store_true", default=False,
1131
                                        help="Remove the instance from the"
1132
                                        " cluster configuration even if there"
1133
                                        " are failures during the removal"
1134
                                        " process")
1135

    
1136
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1137
                                 action="store_true", default=False,
1138
                                 help="Remove the instance from the cluster")
1139

    
1140
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1141
                               help="Specifies the new node for the instance",
1142
                               metavar="NODE", default=None,
1143
                               completion_suggest=OPT_COMPL_ONE_NODE)
1144

    
1145
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1146
                               help="Specifies the new secondary node",
1147
                               metavar="NODE", default=None,
1148
                               completion_suggest=OPT_COMPL_ONE_NODE)
1149

    
1150
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1151
                             help="Specifies the new primary node",
1152
                             metavar="<node>", default=None,
1153
                             completion_suggest=OPT_COMPL_ONE_NODE)
1154

    
1155
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1156
                            default=False, action="store_true",
1157
                            help="Replace the disk(s) on the primary"
1158
                                 " node (applies only to internally mirrored"
1159
                                 " disk templates, e.g. %s)" %
1160
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1161

    
1162
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1163
                              default=False, action="store_true",
1164
                              help="Replace the disk(s) on the secondary"
1165
                                   " node (applies only to internally mirrored"
1166
                                   " disk templates, e.g. %s)" %
1167
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1168

    
1169
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1170
                              default=False, action="store_true",
1171
                              help="Lock all nodes and auto-promote as needed"
1172
                              " to MC status")
1173

    
1174
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1175
                              default=False, action="store_true",
1176
                              help="Automatically replace faulty disks"
1177
                                   " (applies only to internally mirrored"
1178
                                   " disk templates, e.g. %s)" %
1179
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1180

    
1181
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1182
                             default=False, action="store_true",
1183
                             help="Ignore current recorded size"
1184
                             " (useful for forcing activation when"
1185
                             " the recorded size is wrong)")
1186

    
1187
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1188
                          metavar="<node>",
1189
                          completion_suggest=OPT_COMPL_ONE_NODE)
1190

    
1191
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1192
                         metavar="<dir>")
1193

    
1194
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1195
                              help="Specify the secondary ip for the node",
1196
                              metavar="ADDRESS", default=None)
1197

    
1198
READD_OPT = cli_option("--readd", dest="readd",
1199
                       default=False, action="store_true",
1200
                       help="Readd old node after replacing it")
1201

    
1202
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1203
                                default=True, action="store_false",
1204
                                help="Disable SSH key fingerprint checking")
1205

    
1206
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1207
                                 default=False, action="store_true",
1208
                                 help="Force the joining of a node")
1209

    
1210
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1211
                    type="bool", default=None, metavar=_YORNO,
1212
                    help="Set the master_candidate flag on the node")
1213

    
1214
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1215
                         type="bool", default=None,
1216
                         help=("Set the offline flag on the node"
1217
                               " (cluster does not communicate with offline"
1218
                               " nodes)"))
1219

    
1220
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1221
                         type="bool", default=None,
1222
                         help=("Set the drained flag on the node"
1223
                               " (excluded from allocation operations)"))
1224

    
1225
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1226
                              type="bool", default=None, metavar=_YORNO,
1227
                              help="Set the master_capable flag on the node")
1228

    
1229
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1230
                          type="bool", default=None, metavar=_YORNO,
1231
                          help="Set the vm_capable flag on the node")
1232

    
1233
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1234
                             type="bool", default=None, metavar=_YORNO,
1235
                             help="Set the allocatable flag on a volume")
1236

    
1237
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1238
                            dest="enabled_hypervisors",
1239
                            help="Comma-separated list of hypervisors",
1240
                            type="string", default=None)
1241

    
1242
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1243
                                        dest="enabled_disk_templates",
1244
                                        help="Comma-separated list of "
1245
                                             "disk templates",
1246
                                        type="string", default=None)
1247

    
1248
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1249
                            type="keyval", default={},
1250
                            help="NIC parameters")
1251

    
1252
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1253
                         dest="candidate_pool_size", type="int",
1254
                         help="Set the candidate pool size")
1255

    
1256
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1257
                         help=("Enables LVM and specifies the volume group"
1258
                               " name (cluster-wide) for disk allocation"
1259
                               " [%s]" % constants.DEFAULT_VG),
1260
                         metavar="VG", default=None)
1261

    
1262
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1263
                          help="Destroy cluster", action="store_true")
1264

    
1265
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1266
                          help="Skip node agreement check (dangerous)",
1267
                          action="store_true", default=False)
1268

    
1269
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1270
                            help="Specify the mac prefix for the instance IP"
1271
                            " addresses, in the format XX:XX:XX",
1272
                            metavar="PREFIX",
1273
                            default=None)
1274

    
1275
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1276
                               help="Specify the node interface (cluster-wide)"
1277
                               " on which the master IP address will be added"
1278
                               " (cluster init default: %s)" %
1279
                               constants.DEFAULT_BRIDGE,
1280
                               metavar="NETDEV",
1281
                               default=None)
1282

    
1283
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1284
                                help="Specify the netmask of the master IP",
1285
                                metavar="NETMASK",
1286
                                default=None)
1287

    
1288
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1289
                                     dest="use_external_mip_script",
1290
                                     help="Specify whether to run a"
1291
                                     " user-provided script for the master"
1292
                                     " IP address turnup and"
1293
                                     " turndown operations",
1294
                                     type="bool", metavar=_YORNO, default=None)
1295

    
1296
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1297
                                help="Specify the default directory (cluster-"
1298
                                "wide) for storing the file-based disks [%s]" %
1299
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1300
                                metavar="DIR",
1301
                                default=None)
1302

    
1303
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1304
  "--shared-file-storage-dir",
1305
  dest="shared_file_storage_dir",
1306
  help="Specify the default directory (cluster-wide) for storing the"
1307
  " shared file-based disks [%s]" %
1308
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1309
  metavar="SHAREDDIR", default=None)
1310

    
1311
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1312
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1313
                                   action="store_false", default=True)
1314

    
1315
MODIFY_ETCHOSTS_OPT = \
1316
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1317
            default=None, type="bool",
1318
            help="Defines whether the cluster should autonomously modify"
1319
            " and keep in sync the /etc/hosts file of the nodes")
1320

    
1321
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1322
                                    help="Don't initialize SSH keys",
1323
                                    action="store_false", default=True)
1324

    
1325
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1326
                             help="Enable parseable error messages",
1327
                             action="store_true", default=False)
1328

    
1329
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1330
                          help="Skip N+1 memory redundancy tests",
1331
                          action="store_true", default=False)
1332

    
1333
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1334
                             help="Type of reboot: soft/hard/full",
1335
                             default=constants.INSTANCE_REBOOT_HARD,
1336
                             metavar="<REBOOT>",
1337
                             choices=list(constants.REBOOT_TYPES))
1338

    
1339
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1340
                                    dest="ignore_secondaries",
1341
                                    default=False, action="store_true",
1342
                                    help="Ignore errors from secondaries")
1343

    
1344
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1345
                            action="store_false", default=True,
1346
                            help="Don't shutdown the instance (unsafe)")
1347

    
1348
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1349
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1350
                         help="Maximum time to wait")
1351

    
1352
COMPRESS_OPT = cli_option("--compress", dest="compress",
1353
                          default=constants.IEC_NONE,
1354
                          help="The compression mode to use",
1355
                          choices=list(constants.IEC_ALL))
1356

    
1357
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1358
                                  dest="shutdown_timeout", type="int",
1359
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1360
                                  help="Maximum time to wait for instance"
1361
                                  " shutdown")
1362

    
1363
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1364
                          default=None,
1365
                          help=("Number of seconds between repetions of the"
1366
                                " command"))
1367

    
1368
EARLY_RELEASE_OPT = cli_option("--early-release",
1369
                               dest="early_release", default=False,
1370
                               action="store_true",
1371
                               help="Release the locks on the secondary"
1372
                               " node(s) early")
1373

    
1374
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1375
                                  dest="new_cluster_cert",
1376
                                  default=False, action="store_true",
1377
                                  help="Generate a new cluster certificate")
1378

    
1379
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1380
                           default=None,
1381
                           help="File containing new RAPI certificate")
1382

    
1383
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1384
                               default=None, action="store_true",
1385
                               help=("Generate a new self-signed RAPI"
1386
                                     " certificate"))
1387

    
1388
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1389
                            default=None,
1390
                            help="File containing new SPICE certificate")
1391

    
1392
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1393
                              default=None,
1394
                              help="File containing the certificate of the CA"
1395
                              " which signed the SPICE certificate")
1396

    
1397
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1398
                                dest="new_spice_cert", default=None,
1399
                                action="store_true",
1400
                                help=("Generate a new self-signed SPICE"
1401
                                      " certificate"))
1402

    
1403
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1404
                                    dest="new_confd_hmac_key",
1405
                                    default=False, action="store_true",
1406
                                    help=("Create a new HMAC key for %s" %
1407
                                          constants.CONFD))
1408

    
1409
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1410
                                       dest="cluster_domain_secret",
1411
                                       default=None,
1412
                                       help=("Load new new cluster domain"
1413
                                             " secret from file"))
1414

    
1415
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1416
                                           dest="new_cluster_domain_secret",
1417
                                           default=False, action="store_true",
1418
                                           help=("Create a new cluster domain"
1419
                                                 " secret"))
1420

    
1421
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1422
                              dest="use_replication_network",
1423
                              help="Whether to use the replication network"
1424
                              " for talking to the nodes",
1425
                              action="store_true", default=False)
1426

    
1427
MAINTAIN_NODE_HEALTH_OPT = \
1428
    cli_option("--maintain-node-health", dest="maintain_node_health",
1429
               metavar=_YORNO, default=None, type="bool",
1430
               help="Configure the cluster to automatically maintain node"
1431
               " health, by shutting down unknown instances, shutting down"
1432
               " unknown DRBD devices, etc.")
1433

    
1434
IDENTIFY_DEFAULTS_OPT = \
1435
    cli_option("--identify-defaults", dest="identify_defaults",
1436
               default=False, action="store_true",
1437
               help="Identify which saved instance parameters are equal to"
1438
               " the current cluster defaults and set them as such, instead"
1439
               " of marking them as overridden")
1440

    
1441
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1442
                         action="store", dest="uid_pool",
1443
                         help=("A list of user-ids or user-id"
1444
                               " ranges separated by commas"))
1445

    
1446
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1447
                          action="store", dest="add_uids",
1448
                          help=("A list of user-ids or user-id"
1449
                                " ranges separated by commas, to be"
1450
                                " added to the user-id pool"))
1451

    
1452
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1453
                             action="store", dest="remove_uids",
1454
                             help=("A list of user-ids or user-id"
1455
                                   " ranges separated by commas, to be"
1456
                                   " removed from the user-id pool"))
1457

    
1458
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1459
                              action="store", dest="reserved_lvs",
1460
                              help=("A comma-separated list of reserved"
1461
                                    " logical volumes names, that will be"
1462
                                    " ignored by cluster verify"))
1463

    
1464
ROMAN_OPT = cli_option("--roman",
1465
                       dest="roman_integers", default=False,
1466
                       action="store_true",
1467
                       help="Use roman numbers for positive integers")
1468

    
1469
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1470
                             action="store", default=None,
1471
                             help="Specifies usermode helper for DRBD")
1472

    
1473
PRIMARY_IP_VERSION_OPT = \
1474
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1475
               action="store", dest="primary_ip_version",
1476
               metavar="%d|%d" % (constants.IP4_VERSION,
1477
                                  constants.IP6_VERSION),
1478
               help="Cluster-wide IP version for primary IP")
1479

    
1480
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1481
                              action="store_true",
1482
                              help="Show machine name for every line in output")
1483

    
1484
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1485
                              action="store_true",
1486
                              help=("Hide successful results and show failures"
1487
                                    " only (determined by the exit code)"))
1488

    
1489
REASON_OPT = cli_option("--reason", default=None,
1490
                        help="The reason for executing the command")
1491

    
1492

    
1493
def _PriorityOptionCb(option, _, value, parser):
1494
  """Callback for processing C{--priority} option.
1495

1496
  """
1497
  value = _PRIONAME_TO_VALUE[value]
1498

    
1499
  setattr(parser.values, option.dest, value)
1500

    
1501

    
1502
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1503
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1504
                          choices=_PRIONAME_TO_VALUE.keys(),
1505
                          action="callback", type="choice",
1506
                          callback=_PriorityOptionCb,
1507
                          help="Priority for opcode processing")
1508

    
1509
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1510
                        type="bool", default=None, metavar=_YORNO,
1511
                        help="Sets the hidden flag on the OS")
1512

    
1513
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1514
                        type="bool", default=None, metavar=_YORNO,
1515
                        help="Sets the blacklisted flag on the OS")
1516

    
1517
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1518
                                     type="bool", metavar=_YORNO,
1519
                                     dest="prealloc_wipe_disks",
1520
                                     help=("Wipe disks prior to instance"
1521
                                           " creation"))
1522

    
1523
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1524
                             type="keyval", default=None,
1525
                             help="Node parameters")
1526

    
1527
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1528
                              action="store", metavar="POLICY", default=None,
1529
                              help="Allocation policy for the node group")
1530

    
1531
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1532
                              type="bool", metavar=_YORNO,
1533
                              dest="node_powered",
1534
                              help="Specify if the SoR for node is powered")
1535

    
1536
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1537
                             default=constants.OOB_TIMEOUT,
1538
                             help="Maximum time to wait for out-of-band helper")
1539

    
1540
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1541
                             default=constants.OOB_POWER_DELAY,
1542
                             help="Time in seconds to wait between power-ons")
1543

    
1544
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1545
                              action="store_true", default=False,
1546
                              help=("Whether command argument should be treated"
1547
                                    " as filter"))
1548

    
1549
NO_REMEMBER_OPT = cli_option("--no-remember",
1550
                             dest="no_remember",
1551
                             action="store_true", default=False,
1552
                             help="Perform but do not record the change"
1553
                             " in the configuration")
1554

    
1555
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1556
                              default=False, action="store_true",
1557
                              help="Evacuate primary instances only")
1558

    
1559
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1560
                                default=False, action="store_true",
1561
                                help="Evacuate secondary instances only"
1562
                                     " (applies only to internally mirrored"
1563
                                     " disk templates, e.g. %s)" %
1564
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1565

    
1566
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1567
                                action="store_true", default=False,
1568
                                help="Pause instance at startup")
1569

    
1570
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1571
                          help="Destination node group (name or uuid)",
1572
                          default=None, action="append",
1573
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1574

    
1575
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1576
                               action="append", dest="ignore_errors",
1577
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1578
                               help="Error code to be ignored")
1579

    
1580
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1581
                            action="append",
1582
                            help=("Specify disk state information in the"
1583
                                  " format"
1584
                                  " storage_type/identifier:option=value,...;"
1585
                                  " note this is unused for now"),
1586
                            type="identkeyval")
1587

    
1588
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1589
                          action="append",
1590
                          help=("Specify hypervisor state information in the"
1591
                                " format hypervisor:option=value,...;"
1592
                                " note this is unused for now"),
1593
                          type="identkeyval")
1594

    
1595
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1596
                                action="store_true", default=False,
1597
                                help="Ignore instance policy violations")
1598

    
1599
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1600
                             help="Sets the instance's runtime memory,"
1601
                             " ballooning it up or down to the new value",
1602
                             default=None, type="unit", metavar="<size>")
1603

    
1604
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1605
                          action="store_true", default=False,
1606
                          help="Marks the grow as absolute instead of the"
1607
                          " (default) relative mode")
1608

    
1609
NETWORK_OPT = cli_option("--network",
1610
                         action="store", default=None, dest="network",
1611
                         help="IP network in CIDR notation")
1612

    
1613
GATEWAY_OPT = cli_option("--gateway",
1614
                         action="store", default=None, dest="gateway",
1615
                         help="IP address of the router (gateway)")
1616

    
1617
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1618
                                  action="store", default=None,
1619
                                  dest="add_reserved_ips",
1620
                                  help="Comma-separated list of"
1621
                                  " reserved IPs to add")
1622

    
1623
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1624
                                     action="store", default=None,
1625
                                     dest="remove_reserved_ips",
1626
                                     help="Comma-delimited list of"
1627
                                     " reserved IPs to remove")
1628

    
1629
NETWORK6_OPT = cli_option("--network6",
1630
                          action="store", default=None, dest="network6",
1631
                          help="IP network in CIDR notation")
1632

    
1633
GATEWAY6_OPT = cli_option("--gateway6",
1634
                          action="store", default=None, dest="gateway6",
1635
                          help="IP6 address of the router (gateway)")
1636

    
1637
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1638
                                  dest="conflicts_check",
1639
                                  default=True,
1640
                                  action="store_false",
1641
                                  help="Don't check for conflicting IPs")
1642

    
1643
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1644
                                 default=False, action="store_true",
1645
                                 help="Include default values")
1646

    
1647
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1648
                         action="store_true", default=False,
1649
                         help="Hotplug supported devices (NICs and Disks)")
1650

    
1651
#: Options provided by all commands
1652
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1653

    
1654
# options related to asynchronous job handling
1655

    
1656
SUBMIT_OPTS = [
1657
  SUBMIT_OPT,
1658
  PRINT_JOBID_OPT,
1659
  ]
1660

    
1661
# common options for creating instances. add and import then add their own
1662
# specific ones.
1663
COMMON_CREATE_OPTS = [
1664
  BACKEND_OPT,
1665
  DISK_OPT,
1666
  DISK_TEMPLATE_OPT,
1667
  FILESTORE_DIR_OPT,
1668
  FILESTORE_DRIVER_OPT,
1669
  HYPERVISOR_OPT,
1670
  IALLOCATOR_OPT,
1671
  NET_OPT,
1672
  NODE_PLACEMENT_OPT,
1673
  NOIPCHECK_OPT,
1674
  NOCONFLICTSCHECK_OPT,
1675
  NONAMECHECK_OPT,
1676
  NONICS_OPT,
1677
  NWSYNC_OPT,
1678
  OSPARAMS_OPT,
1679
  OS_SIZE_OPT,
1680
  SUBMIT_OPT,
1681
  PRINT_JOBID_OPT,
1682
  TAG_ADD_OPT,
1683
  DRY_RUN_OPT,
1684
  PRIORITY_OPT,
1685
  ]
1686

    
1687
# common instance policy options
1688
INSTANCE_POLICY_OPTS = [
1689
  IPOLICY_BOUNDS_SPECS_OPT,
1690
  IPOLICY_DISK_TEMPLATES,
1691
  IPOLICY_VCPU_RATIO,
1692
  IPOLICY_SPINDLE_RATIO,
1693
  ]
1694

    
1695
# instance policy split specs options
1696
SPLIT_ISPECS_OPTS = [
1697
  SPECS_CPU_COUNT_OPT,
1698
  SPECS_DISK_COUNT_OPT,
1699
  SPECS_DISK_SIZE_OPT,
1700
  SPECS_MEM_SIZE_OPT,
1701
  SPECS_NIC_COUNT_OPT,
1702
  ]
1703

    
1704

    
1705
class _ShowUsage(Exception):
1706
  """Exception class for L{_ParseArgs}.
1707

1708
  """
1709
  def __init__(self, exit_error):
1710
    """Initializes instances of this class.
1711

1712
    @type exit_error: bool
1713
    @param exit_error: Whether to report failure on exit
1714

1715
    """
1716
    Exception.__init__(self)
1717
    self.exit_error = exit_error
1718

    
1719

    
1720
class _ShowVersion(Exception):
1721
  """Exception class for L{_ParseArgs}.
1722

1723
  """
1724

    
1725

    
1726
def _ParseArgs(binary, argv, commands, aliases, env_override):
1727
  """Parser for the command line arguments.
1728

1729
  This function parses the arguments and returns the function which
1730
  must be executed together with its (modified) arguments.
1731

1732
  @param binary: Script name
1733
  @param argv: Command line arguments
1734
  @param commands: Dictionary containing command definitions
1735
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1736
  @param env_override: list of env variables allowed for default args
1737
  @raise _ShowUsage: If usage description should be shown
1738
  @raise _ShowVersion: If version should be shown
1739

1740
  """
1741
  assert not (env_override - set(commands))
1742
  assert not (set(aliases.keys()) & set(commands.keys()))
1743

    
1744
  if len(argv) > 1:
1745
    cmd = argv[1]
1746
  else:
1747
    # No option or command given
1748
    raise _ShowUsage(exit_error=True)
1749

    
1750
  if cmd == "--version":
1751
    raise _ShowVersion()
1752
  elif cmd == "--help":
1753
    raise _ShowUsage(exit_error=False)
1754
  elif not (cmd in commands or cmd in aliases):
1755
    raise _ShowUsage(exit_error=True)
1756

    
1757
  # get command, unalias it, and look it up in commands
1758
  if cmd in aliases:
1759
    if aliases[cmd] not in commands:
1760
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1761
                                   " command '%s'" % (cmd, aliases[cmd]))
1762

    
1763
    cmd = aliases[cmd]
1764

    
1765
  if cmd in env_override:
1766
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1767
    env_args = os.environ.get(args_env_name)
1768
    if env_args:
1769
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1770

    
1771
  func, args_def, parser_opts, usage, description = commands[cmd]
1772
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1773
                        description=description,
1774
                        formatter=TitledHelpFormatter(),
1775
                        usage="%%prog %s %s" % (cmd, usage))
1776
  parser.disable_interspersed_args()
1777
  options, args = parser.parse_args(args=argv[2:])
1778

    
1779
  if not _CheckArguments(cmd, args_def, args):
1780
    return None, None, None
1781

    
1782
  return func, options, args
1783

    
1784

    
1785
def _FormatUsage(binary, commands):
1786
  """Generates a nice description of all commands.
1787

1788
  @param binary: Script name
1789
  @param commands: Dictionary containing command definitions
1790

1791
  """
1792
  # compute the max line length for cmd + usage
1793
  mlen = min(60, max(map(len, commands)))
1794

    
1795
  yield "Usage: %s {command} [options...] [argument...]" % binary
1796
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1797
  yield ""
1798
  yield "Commands:"
1799

    
1800
  # and format a nice command list
1801
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1802
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1803
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1804
    for line in help_lines:
1805
      yield " %-*s   %s" % (mlen, "", line)
1806

    
1807
  yield ""
1808

    
1809

    
1810
def _CheckArguments(cmd, args_def, args):
1811
  """Verifies the arguments using the argument definition.
1812

1813
  Algorithm:
1814

1815
    1. Abort with error if values specified by user but none expected.
1816

1817
    1. For each argument in definition
1818

1819
      1. Keep running count of minimum number of values (min_count)
1820
      1. Keep running count of maximum number of values (max_count)
1821
      1. If it has an unlimited number of values
1822

1823
        1. Abort with error if it's not the last argument in the definition
1824

1825
    1. If last argument has limited number of values
1826

1827
      1. Abort with error if number of values doesn't match or is too large
1828

1829
    1. Abort with error if user didn't pass enough values (min_count)
1830

1831
  """
1832
  if args and not args_def:
1833
    ToStderr("Error: Command %s expects no arguments", cmd)
1834
    return False
1835

    
1836
  min_count = None
1837
  max_count = None
1838
  check_max = None
1839

    
1840
  last_idx = len(args_def) - 1
1841

    
1842
  for idx, arg in enumerate(args_def):
1843
    if min_count is None:
1844
      min_count = arg.min
1845
    elif arg.min is not None:
1846
      min_count += arg.min
1847

    
1848
    if max_count is None:
1849
      max_count = arg.max
1850
    elif arg.max is not None:
1851
      max_count += arg.max
1852

    
1853
    if idx == last_idx:
1854
      check_max = (arg.max is not None)
1855

    
1856
    elif arg.max is None:
1857
      raise errors.ProgrammerError("Only the last argument can have max=None")
1858

    
1859
  if check_max:
1860
    # Command with exact number of arguments
1861
    if (min_count is not None and max_count is not None and
1862
        min_count == max_count and len(args) != min_count):
1863
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1864
      return False
1865

    
1866
    # Command with limited number of arguments
1867
    if max_count is not None and len(args) > max_count:
1868
      ToStderr("Error: Command %s expects only %d argument(s)",
1869
               cmd, max_count)
1870
      return False
1871

    
1872
  # Command with some required arguments
1873
  if min_count is not None and len(args) < min_count:
1874
    ToStderr("Error: Command %s expects at least %d argument(s)",
1875
             cmd, min_count)
1876
    return False
1877

    
1878
  return True
1879

    
1880

    
1881
def SplitNodeOption(value):
1882
  """Splits the value of a --node option.
1883

1884
  """
1885
  if value and ":" in value:
1886
    return value.split(":", 1)
1887
  else:
1888
    return (value, None)
1889

    
1890

    
1891
def CalculateOSNames(os_name, os_variants):
1892
  """Calculates all the names an OS can be called, according to its variants.
1893

1894
  @type os_name: string
1895
  @param os_name: base name of the os
1896
  @type os_variants: list or None
1897
  @param os_variants: list of supported variants
1898
  @rtype: list
1899
  @return: list of valid names
1900

1901
  """
1902
  if os_variants:
1903
    return ["%s+%s" % (os_name, v) for v in os_variants]
1904
  else:
1905
    return [os_name]
1906

    
1907

    
1908
def ParseFields(selected, default):
1909
  """Parses the values of "--field"-like options.
1910

1911
  @type selected: string or None
1912
  @param selected: User-selected options
1913
  @type default: list
1914
  @param default: Default fields
1915

1916
  """
1917
  if selected is None:
1918
    return default
1919

    
1920
  if selected.startswith("+"):
1921
    return default + selected[1:].split(",")
1922

    
1923
  return selected.split(",")
1924

    
1925

    
1926
UsesRPC = rpc.RunWithRPC
1927

    
1928

    
1929
def AskUser(text, choices=None):
1930
  """Ask the user a question.
1931

1932
  @param text: the question to ask
1933

1934
  @param choices: list with elements tuples (input_char, return_value,
1935
      description); if not given, it will default to: [('y', True,
1936
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1937
      note that the '?' char is reserved for help
1938

1939
  @return: one of the return values from the choices list; if input is
1940
      not possible (i.e. not running with a tty, we return the last
1941
      entry from the list
1942

1943
  """
1944
  if choices is None:
1945
    choices = [("y", True, "Perform the operation"),
1946
               ("n", False, "Do not perform the operation")]
1947
  if not choices or not isinstance(choices, list):
1948
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1949
  for entry in choices:
1950
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1951
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1952

    
1953
  answer = choices[-1][1]
1954
  new_text = []
1955
  for line in text.splitlines():
1956
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1957
  text = "\n".join(new_text)
1958
  try:
1959
    f = file("/dev/tty", "a+")
1960
  except IOError:
1961
    return answer
1962
  try:
1963
    chars = [entry[0] for entry in choices]
1964
    chars[-1] = "[%s]" % chars[-1]
1965
    chars.append("?")
1966
    maps = dict([(entry[0], entry[1]) for entry in choices])
1967
    while True:
1968
      f.write(text)
1969
      f.write("\n")
1970
      f.write("/".join(chars))
1971
      f.write(": ")
1972
      line = f.readline(2).strip().lower()
1973
      if line in maps:
1974
        answer = maps[line]
1975
        break
1976
      elif line == "?":
1977
        for entry in choices:
1978
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1979
        f.write("\n")
1980
        continue
1981
  finally:
1982
    f.close()
1983
  return answer
1984

    
1985

    
1986
class JobSubmittedException(Exception):
1987
  """Job was submitted, client should exit.
1988

1989
  This exception has one argument, the ID of the job that was
1990
  submitted. The handler should print this ID.
1991

1992
  This is not an error, just a structured way to exit from clients.
1993

1994
  """
1995

    
1996

    
1997
def SendJob(ops, cl=None):
1998
  """Function to submit an opcode without waiting for the results.
1999

2000
  @type ops: list
2001
  @param ops: list of opcodes
2002
  @type cl: luxi.Client
2003
  @param cl: the luxi client to use for communicating with the master;
2004
             if None, a new client will be created
2005

2006
  """
2007
  if cl is None:
2008
    cl = GetClient()
2009

    
2010
  job_id = cl.SubmitJob(ops)
2011

    
2012
  return job_id
2013

    
2014

    
2015
def GenericPollJob(job_id, cbs, report_cbs):
2016
  """Generic job-polling function.
2017

2018
  @type job_id: number
2019
  @param job_id: Job ID
2020
  @type cbs: Instance of L{JobPollCbBase}
2021
  @param cbs: Data callbacks
2022
  @type report_cbs: Instance of L{JobPollReportCbBase}
2023
  @param report_cbs: Reporting callbacks
2024

2025
  """
2026
  prev_job_info = None
2027
  prev_logmsg_serial = None
2028

    
2029
  status = None
2030

    
2031
  while True:
2032
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2033
                                      prev_logmsg_serial)
2034
    if not result:
2035
      # job not found, go away!
2036
      raise errors.JobLost("Job with id %s lost" % job_id)
2037

    
2038
    if result == constants.JOB_NOTCHANGED:
2039
      report_cbs.ReportNotChanged(job_id, status)
2040

    
2041
      # Wait again
2042
      continue
2043

    
2044
    # Split result, a tuple of (field values, log entries)
2045
    (job_info, log_entries) = result
2046
    (status, ) = job_info
2047

    
2048
    if log_entries:
2049
      for log_entry in log_entries:
2050
        (serial, timestamp, log_type, message) = log_entry
2051
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2052
                                    log_type, message)
2053
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2054

    
2055
    # TODO: Handle canceled and archived jobs
2056
    elif status in (constants.JOB_STATUS_SUCCESS,
2057
                    constants.JOB_STATUS_ERROR,
2058
                    constants.JOB_STATUS_CANCELING,
2059
                    constants.JOB_STATUS_CANCELED):
2060
      break
2061

    
2062
    prev_job_info = job_info
2063

    
2064
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2065
  if not jobs:
2066
    raise errors.JobLost("Job with id %s lost" % job_id)
2067

    
2068
  status, opstatus, result = jobs[0]
2069

    
2070
  if status == constants.JOB_STATUS_SUCCESS:
2071
    return result
2072

    
2073
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2074
    raise errors.OpExecError("Job was canceled")
2075

    
2076
  has_ok = False
2077
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2078
    if status == constants.OP_STATUS_SUCCESS:
2079
      has_ok = True
2080
    elif status == constants.OP_STATUS_ERROR:
2081
      errors.MaybeRaise(msg)
2082

    
2083
      if has_ok:
2084
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2085
                                 (idx, msg))
2086

    
2087
      raise errors.OpExecError(str(msg))
2088

    
2089
  # default failure mode
2090
  raise errors.OpExecError(result)
2091

    
2092

    
2093
class JobPollCbBase:
2094
  """Base class for L{GenericPollJob} callbacks.
2095

2096
  """
2097
  def __init__(self):
2098
    """Initializes this class.
2099

2100
    """
2101

    
2102
  def WaitForJobChangeOnce(self, job_id, fields,
2103
                           prev_job_info, prev_log_serial):
2104
    """Waits for changes on a job.
2105

2106
    """
2107
    raise NotImplementedError()
2108

    
2109
  def QueryJobs(self, job_ids, fields):
2110
    """Returns the selected fields for the selected job IDs.
2111

2112
    @type job_ids: list of numbers
2113
    @param job_ids: Job IDs
2114
    @type fields: list of strings
2115
    @param fields: Fields
2116

2117
    """
2118
    raise NotImplementedError()
2119

    
2120

    
2121
class JobPollReportCbBase:
2122
  """Base class for L{GenericPollJob} reporting callbacks.
2123

2124
  """
2125
  def __init__(self):
2126
    """Initializes this class.
2127

2128
    """
2129

    
2130
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2131
    """Handles a log message.
2132

2133
    """
2134
    raise NotImplementedError()
2135

    
2136
  def ReportNotChanged(self, job_id, status):
2137
    """Called for if a job hasn't changed in a while.
2138

2139
    @type job_id: number
2140
    @param job_id: Job ID
2141
    @type status: string or None
2142
    @param status: Job status if available
2143

2144
    """
2145
    raise NotImplementedError()
2146

    
2147

    
2148
class _LuxiJobPollCb(JobPollCbBase):
2149
  def __init__(self, cl):
2150
    """Initializes this class.
2151

2152
    """
2153
    JobPollCbBase.__init__(self)
2154
    self.cl = cl
2155

    
2156
  def WaitForJobChangeOnce(self, job_id, fields,
2157
                           prev_job_info, prev_log_serial):
2158
    """Waits for changes on a job.
2159

2160
    """
2161
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2162
                                        prev_job_info, prev_log_serial)
2163

    
2164
  def QueryJobs(self, job_ids, fields):
2165
    """Returns the selected fields for the selected job IDs.
2166

2167
    """
2168
    return self.cl.QueryJobs(job_ids, fields)
2169

    
2170

    
2171
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2172
  def __init__(self, feedback_fn):
2173
    """Initializes this class.
2174

2175
    """
2176
    JobPollReportCbBase.__init__(self)
2177

    
2178
    self.feedback_fn = feedback_fn
2179

    
2180
    assert callable(feedback_fn)
2181

    
2182
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2183
    """Handles a log message.
2184

2185
    """
2186
    self.feedback_fn((timestamp, log_type, log_msg))
2187

    
2188
  def ReportNotChanged(self, job_id, status):
2189
    """Called if a job hasn't changed in a while.
2190

2191
    """
2192
    # Ignore
2193

    
2194

    
2195
class StdioJobPollReportCb(JobPollReportCbBase):
2196
  def __init__(self):
2197
    """Initializes this class.
2198

2199
    """
2200
    JobPollReportCbBase.__init__(self)
2201

    
2202
    self.notified_queued = False
2203
    self.notified_waitlock = False
2204

    
2205
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2206
    """Handles a log message.
2207

2208
    """
2209
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2210
             FormatLogMessage(log_type, log_msg))
2211

    
2212
  def ReportNotChanged(self, job_id, status):
2213
    """Called if a job hasn't changed in a while.
2214

2215
    """
2216
    if status is None:
2217
      return
2218

    
2219
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2220
      ToStderr("Job %s is waiting in queue", job_id)
2221
      self.notified_queued = True
2222

    
2223
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2224
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2225
      self.notified_waitlock = True
2226

    
2227

    
2228
def FormatLogMessage(log_type, log_msg):
2229
  """Formats a job message according to its type.
2230

2231
  """
2232
  if log_type != constants.ELOG_MESSAGE:
2233
    log_msg = str(log_msg)
2234

    
2235
  return utils.SafeEncode(log_msg)
2236

    
2237

    
2238
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2239
  """Function to poll for the result of a job.
2240

2241
  @type job_id: job identified
2242
  @param job_id: the job to poll for results
2243
  @type cl: luxi.Client
2244
  @param cl: the luxi client to use for communicating with the master;
2245
             if None, a new client will be created
2246

2247
  """
2248
  if cl is None:
2249
    cl = GetClient()
2250

    
2251
  if reporter is None:
2252
    if feedback_fn:
2253
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2254
    else:
2255
      reporter = StdioJobPollReportCb()
2256
  elif feedback_fn:
2257
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2258

    
2259
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2260

    
2261

    
2262
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2263
  """Legacy function to submit an opcode.
2264

2265
  This is just a simple wrapper over the construction of the processor
2266
  instance. It should be extended to better handle feedback and
2267
  interaction functions.
2268

2269
  """
2270
  if cl is None:
2271
    cl = GetClient()
2272

    
2273
  SetGenericOpcodeOpts([op], opts)
2274

    
2275
  job_id = SendJob([op], cl=cl)
2276
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2277
    ToStdout("%d" % job_id)
2278

    
2279
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2280
                       reporter=reporter)
2281

    
2282
  return op_results[0]
2283

    
2284

    
2285
def SubmitOpCodeToDrainedQueue(op):
2286
  """Forcefully insert a job in the queue, even if it is drained.
2287

2288
  """
2289
  cl = GetClient()
2290
  job_id = cl.SubmitJobToDrainedQueue([op])
2291
  op_results = PollJob(job_id, cl=cl)
2292
  return op_results[0]
2293

    
2294

    
2295
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2296
  """Wrapper around SubmitOpCode or SendJob.
2297

2298
  This function will decide, based on the 'opts' parameter, whether to
2299
  submit and wait for the result of the opcode (and return it), or
2300
  whether to just send the job and print its identifier. It is used in
2301
  order to simplify the implementation of the '--submit' option.
2302

2303
  It will also process the opcodes if we're sending the via SendJob
2304
  (otherwise SubmitOpCode does it).
2305

2306
  """
2307
  if opts and opts.submit_only:
2308
    job = [op]
2309
    SetGenericOpcodeOpts(job, opts)
2310
    job_id = SendJob(job, cl=cl)
2311
    if opts.print_jobid:
2312
      ToStdout("%d" % job_id)
2313
    raise JobSubmittedException(job_id)
2314
  else:
2315
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2316

    
2317

    
2318
def _InitReasonTrail(op, opts):
2319
  """Builds the first part of the reason trail
2320

2321
  Builds the initial part of the reason trail, adding the user provided reason
2322
  (if it exists) and the name of the command starting the operation.
2323

2324
  @param op: the opcode the reason trail will be added to
2325
  @param opts: the command line options selected by the user
2326

2327
  """
2328
  assert len(sys.argv) >= 2
2329
  trail = []
2330

    
2331
  if opts.reason:
2332
    trail.append((constants.OPCODE_REASON_SRC_USER,
2333
                  opts.reason,
2334
                  utils.EpochNano()))
2335

    
2336
  binary = os.path.basename(sys.argv[0])
2337
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2338
  command = sys.argv[1]
2339
  trail.append((source, command, utils.EpochNano()))
2340
  op.reason = trail
2341

    
2342

    
2343
def SetGenericOpcodeOpts(opcode_list, options):
2344
  """Processor for generic options.
2345

2346
  This function updates the given opcodes based on generic command
2347
  line options (like debug, dry-run, etc.).
2348

2349
  @param opcode_list: list of opcodes
2350
  @param options: command line options or None
2351
  @return: None (in-place modification)
2352

2353
  """
2354
  if not options:
2355
    return
2356
  for op in opcode_list:
2357
    op.debug_level = options.debug
2358
    if hasattr(options, "dry_run"):
2359
      op.dry_run = options.dry_run
2360
    if getattr(options, "priority", None) is not None:
2361
      op.priority = options.priority
2362
    _InitReasonTrail(op, options)
2363

    
2364

    
2365
def FormatError(err):
2366
  """Return a formatted error message for a given error.
2367

2368
  This function takes an exception instance and returns a tuple
2369
  consisting of two values: first, the recommended exit code, and
2370
  second, a string describing the error message (not
2371
  newline-terminated).
2372

2373
  """
2374
  retcode = 1
2375
  obuf = StringIO()
2376
  msg = str(err)
2377
  if isinstance(err, errors.ConfigurationError):
2378
    txt = "Corrupt configuration file: %s" % msg
2379
    logging.error(txt)
2380
    obuf.write(txt + "\n")
2381
    obuf.write("Aborting.")
2382
    retcode = 2
2383
  elif isinstance(err, errors.HooksAbort):
2384
    obuf.write("Failure: hooks execution failed:\n")
2385
    for node, script, out in err.args[0]:
2386
      if out:
2387
        obuf.write("  node: %s, script: %s, output: %s\n" %
2388
                   (node, script, out))
2389
      else:
2390
        obuf.write("  node: %s, script: %s (no output)\n" %
2391
                   (node, script))
2392
  elif isinstance(err, errors.HooksFailure):
2393
    obuf.write("Failure: hooks general failure: %s" % msg)
2394
  elif isinstance(err, errors.ResolverError):
2395
    this_host = netutils.Hostname.GetSysName()
2396
    if err.args[0] == this_host:
2397
      msg = "Failure: can't resolve my own hostname ('%s')"
2398
    else:
2399
      msg = "Failure: can't resolve hostname '%s'"
2400
    obuf.write(msg % err.args[0])
2401
  elif isinstance(err, errors.OpPrereqError):
2402
    if len(err.args) == 2:
2403
      obuf.write("Failure: prerequisites not met for this"
2404
                 " operation:\nerror type: %s, error details:\n%s" %
2405
                 (err.args[1], err.args[0]))
2406
    else:
2407
      obuf.write("Failure: prerequisites not met for this"
2408
                 " operation:\n%s" % msg)
2409
  elif isinstance(err, errors.OpExecError):
2410
    obuf.write("Failure: command execution error:\n%s" % msg)
2411
  elif isinstance(err, errors.TagError):
2412
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2413
  elif isinstance(err, errors.JobQueueDrainError):
2414
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2415
               " accept new requests\n")
2416
  elif isinstance(err, errors.JobQueueFull):
2417
    obuf.write("Failure: the job queue is full and doesn't accept new"
2418
               " job submissions until old jobs are archived\n")
2419
  elif isinstance(err, errors.TypeEnforcementError):
2420
    obuf.write("Parameter Error: %s" % msg)
2421
  elif isinstance(err, errors.ParameterError):
2422
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2423
  elif isinstance(err, luxi.NoMasterError):
2424
    if err.args[0] == pathutils.MASTER_SOCKET:
2425
      daemon = "the master daemon"
2426
    elif err.args[0] == pathutils.QUERY_SOCKET:
2427
      daemon = "the config daemon"
2428
    else:
2429
      daemon = "socket '%s'" % str(err.args[0])
2430
    obuf.write("Cannot communicate with %s.\nIs the process running"
2431
               " and listening for connections?" % daemon)
2432
  elif isinstance(err, luxi.TimeoutError):
2433
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2434
               " been submitted and will continue to run even if the call"
2435
               " timed out. Useful commands in this situation are \"gnt-job"
2436
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2437
    obuf.write(msg)
2438
  elif isinstance(err, luxi.PermissionError):
2439
    obuf.write("It seems you don't have permissions to connect to the"
2440
               " master daemon.\nPlease retry as a different user.")
2441
  elif isinstance(err, luxi.ProtocolError):
2442
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2443
               "%s" % msg)
2444
  elif isinstance(err, errors.JobLost):
2445
    obuf.write("Error checking job status: %s" % msg)
2446
  elif isinstance(err, errors.QueryFilterParseError):
2447
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2448
    obuf.write("\n".join(err.GetDetails()))
2449
  elif isinstance(err, errors.GenericError):
2450
    obuf.write("Unhandled Ganeti error: %s" % msg)
2451
  elif isinstance(err, JobSubmittedException):
2452
    obuf.write("JobID: %s\n" % err.args[0])
2453
    retcode = 0
2454
  else:
2455
    obuf.write("Unhandled exception: %s" % msg)
2456
  return retcode, obuf.getvalue().rstrip("\n")
2457

    
2458

    
2459
def GenericMain(commands, override=None, aliases=None,
2460
                env_override=frozenset()):
2461
  """Generic main function for all the gnt-* commands.
2462

2463
  @param commands: a dictionary with a special structure, see the design doc
2464
                   for command line handling.
2465
  @param override: if not None, we expect a dictionary with keys that will
2466
                   override command line options; this can be used to pass
2467
                   options from the scripts to generic functions
2468
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2469
  @param env_override: list of environment names which are allowed to submit
2470
                       default args for commands
2471

2472
  """
2473
  # save the program name and the entire command line for later logging
2474
  if sys.argv:
2475
    binary = os.path.basename(sys.argv[0])
2476
    if not binary:
2477
      binary = sys.argv[0]
2478

    
2479
    if len(sys.argv) >= 2:
2480
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2481
    else:
2482
      logname = binary
2483

    
2484
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2485
  else:
2486
    binary = "<unknown program>"
2487
    cmdline = "<unknown>"
2488

    
2489
  if aliases is None:
2490
    aliases = {}
2491

    
2492
  try:
2493
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2494
                                       env_override)
2495
  except _ShowVersion:
2496
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2497
             constants.RELEASE_VERSION)
2498
    return constants.EXIT_SUCCESS
2499
  except _ShowUsage, err:
2500
    for line in _FormatUsage(binary, commands):
2501
      ToStdout(line)
2502

    
2503
    if err.exit_error:
2504
      return constants.EXIT_FAILURE
2505
    else:
2506
      return constants.EXIT_SUCCESS
2507
  except errors.ParameterError, err:
2508
    result, err_msg = FormatError(err)
2509
    ToStderr(err_msg)
2510
    return 1
2511

    
2512
  if func is None: # parse error
2513
    return 1
2514

    
2515
  if override is not None:
2516
    for key, val in override.iteritems():
2517
      setattr(options, key, val)
2518

    
2519
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2520
                     stderr_logging=True)
2521

    
2522
  logging.info("Command line: %s", cmdline)
2523

    
2524
  try:
2525
    result = func(options, args)
2526
  except (errors.GenericError, luxi.ProtocolError,
2527
          JobSubmittedException), err:
2528
    result, err_msg = FormatError(err)
2529
    logging.exception("Error during command processing")
2530
    ToStderr(err_msg)
2531
  except KeyboardInterrupt:
2532
    result = constants.EXIT_FAILURE
2533
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2534
             " might have been submitted and"
2535
             " will continue to run in the background.")
2536
  except IOError, err:
2537
    if err.errno == errno.EPIPE:
2538
      # our terminal went away, we'll exit
2539
      sys.exit(constants.EXIT_FAILURE)
2540
    else:
2541
      raise
2542

    
2543
  return result
2544

    
2545

    
2546
def ParseNicOption(optvalue):
2547
  """Parses the value of the --net option(s).
2548

2549
  """
2550
  try:
2551
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2552
  except (TypeError, ValueError), err:
2553
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2554
                               errors.ECODE_INVAL)
2555

    
2556
  nics = [{}] * nic_max
2557
  for nidx, ndict in optvalue:
2558
    nidx = int(nidx)
2559

    
2560
    if not isinstance(ndict, dict):
2561
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2562
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2563

    
2564
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2565

    
2566
    nics[nidx] = ndict
2567

    
2568
  return nics
2569

    
2570

    
2571
def GenericInstanceCreate(mode, opts, args):
2572
  """Add an instance to the cluster via either creation or import.
2573

2574
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2575
  @param opts: the command line options selected by the user
2576
  @type args: list
2577
  @param args: should contain only one element, the new instance name
2578
  @rtype: int
2579
  @return: the desired exit code
2580

2581
  """
2582
  instance = args[0]
2583

    
2584
  (pnode, snode) = SplitNodeOption(opts.node)
2585

    
2586
  hypervisor = None
2587
  hvparams = {}
2588
  if opts.hypervisor:
2589
    hypervisor, hvparams = opts.hypervisor
2590

    
2591
  if opts.nics:
2592
    nics = ParseNicOption(opts.nics)
2593
  elif opts.no_nics:
2594
    # no nics
2595
    nics = []
2596
  elif mode == constants.INSTANCE_CREATE:
2597
    # default of one nic, all auto
2598
    nics = [{}]
2599
  else:
2600
    # mode == import
2601
    nics = []
2602

    
2603
  if opts.disk_template == constants.DT_DISKLESS:
2604
    if opts.disks or opts.sd_size is not None:
2605
      raise errors.OpPrereqError("Diskless instance but disk"
2606
                                 " information passed", errors.ECODE_INVAL)
2607
    disks = []
2608
  else:
2609
    if (not opts.disks and not opts.sd_size
2610
        and mode == constants.INSTANCE_CREATE):
2611
      raise errors.OpPrereqError("No disk information specified",
2612
                                 errors.ECODE_INVAL)
2613
    if opts.disks and opts.sd_size is not None:
2614
      raise errors.OpPrereqError("Please use either the '--disk' or"
2615
                                 " '-s' option", errors.ECODE_INVAL)
2616
    if opts.sd_size is not None:
2617
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2618

    
2619
    if opts.disks:
2620
      try:
2621
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2622
      except ValueError, err:
2623
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2624
                                   errors.ECODE_INVAL)
2625
      disks = [{}] * disk_max
2626
    else:
2627
      disks = []
2628
    for didx, ddict in opts.disks:
2629
      didx = int(didx)
2630
      if not isinstance(ddict, dict):
2631
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2632
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2633
      elif constants.IDISK_SIZE in ddict:
2634
        if constants.IDISK_ADOPT in ddict:
2635
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2636
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2637
        try:
2638
          ddict[constants.IDISK_SIZE] = \
2639
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2640
        except ValueError, err:
2641
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2642
                                     (didx, err), errors.ECODE_INVAL)
2643
      elif constants.IDISK_ADOPT in ddict:
2644
        if constants.IDISK_SPINDLES in ddict:
2645
          raise errors.OpPrereqError("spindles is not a valid option when"
2646
                                     " adopting a disk", errors.ECODE_INVAL)
2647
        if mode == constants.INSTANCE_IMPORT:
2648
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2649
                                     " import", errors.ECODE_INVAL)
2650
        ddict[constants.IDISK_SIZE] = 0
2651
      else:
2652
        raise errors.OpPrereqError("Missing size or adoption source for"
2653
                                   " disk %d" % didx, errors.ECODE_INVAL)
2654
      if constants.IDISK_SPINDLES in ddict:
2655
        ddict[constants.IDISK_SPINDLES] = \
2656
          utils.ParseUnit(ddict[constants.IDISK_SPINDLES])
2657

    
2658
      disks[didx] = ddict
2659

    
2660
  if opts.tags is not None:
2661
    tags = opts.tags.split(",")
2662
  else:
2663
    tags = []
2664

    
2665
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2666
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2667

    
2668
  if mode == constants.INSTANCE_CREATE:
2669
    start = opts.start
2670
    os_type = opts.os
2671
    force_variant = opts.force_variant
2672
    src_node = None
2673
    src_path = None
2674
    no_install = opts.no_install
2675
    identify_defaults = False
2676
    compress = constants.IEC_NONE
2677
  elif mode == constants.INSTANCE_IMPORT:
2678
    start = False
2679
    os_type = None
2680
    force_variant = False
2681
    src_node = opts.src_node
2682
    src_path = opts.src_dir
2683
    no_install = None
2684
    identify_defaults = opts.identify_defaults
2685
    compress = opts.compress
2686
  else:
2687
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2688

    
2689
  op = opcodes.OpInstanceCreate(instance_name=instance,
2690
                                disks=disks,
2691
                                disk_template=opts.disk_template,
2692
                                nics=nics,
2693
                                conflicts_check=opts.conflicts_check,
2694
                                pnode=pnode, snode=snode,
2695
                                ip_check=opts.ip_check,
2696
                                name_check=opts.name_check,
2697
                                wait_for_sync=opts.wait_for_sync,
2698
                                file_storage_dir=opts.file_storage_dir,
2699
                                file_driver=opts.file_driver,
2700
                                iallocator=opts.iallocator,
2701
                                hypervisor=hypervisor,
2702
                                hvparams=hvparams,
2703
                                beparams=opts.beparams,
2704
                                osparams=opts.osparams,
2705
                                mode=mode,
2706
                                start=start,
2707
                                os_type=os_type,
2708
                                force_variant=force_variant,
2709
                                src_node=src_node,
2710
                                src_path=src_path,
2711
                                compress=compress,
2712
                                tags=tags,
2713
                                no_install=no_install,
2714
                                identify_defaults=identify_defaults,
2715
                                ignore_ipolicy=opts.ignore_ipolicy)
2716

    
2717
  SubmitOrSend(op, opts)
2718
  return 0
2719

    
2720

    
2721
class _RunWhileClusterStoppedHelper:
2722
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2723

2724
  """
2725
  def __init__(self, feedback_fn, cluster_name, master_node,
2726
               online_nodes, ssh_ports):
2727
    """Initializes this class.
2728

2729
    @type feedback_fn: callable
2730
    @param feedback_fn: Feedback function
2731
    @type cluster_name: string
2732
    @param cluster_name: Cluster name
2733
    @type master_node: string
2734
    @param master_node Master node name
2735
    @type online_nodes: list
2736
    @param online_nodes: List of names of online nodes
2737
    @type ssh_ports: list
2738
    @param ssh_ports: List of SSH ports of online nodes
2739

2740
    """
2741
    self.feedback_fn = feedback_fn
2742
    self.cluster_name = cluster_name
2743
    self.master_node = master_node
2744
    self.online_nodes = online_nodes
2745
    self.ssh_ports = dict(zip(online_nodes, ssh_ports))
2746

    
2747
    self.ssh = ssh.SshRunner(self.cluster_name)
2748

    
2749
    self.nonmaster_nodes = [name for name in online_nodes
2750
                            if name != master_node]
2751

    
2752
    assert self.master_node not in self.nonmaster_nodes
2753

    
2754
  def _RunCmd(self, node_name, cmd):
2755
    """Runs a command on the local or a remote machine.
2756

2757
    @type node_name: string
2758
    @param node_name: Machine name
2759
    @type cmd: list
2760
    @param cmd: Command
2761

2762
    """
2763
    if node_name is None or node_name == self.master_node:
2764
      # No need to use SSH
2765
      result = utils.RunCmd(cmd)
2766
    else:
2767
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2768
                            utils.ShellQuoteArgs(cmd),
2769
                            port=self.ssh_ports[node_name])
2770

    
2771
    if result.failed:
2772
      errmsg = ["Failed to run command %s" % result.cmd]
2773
      if node_name:
2774
        errmsg.append("on node %s" % node_name)
2775
      errmsg.append(": exitcode %s and error %s" %
2776
                    (result.exit_code, result.output))
2777
      raise errors.OpExecError(" ".join(errmsg))
2778

    
2779
  def Call(self, fn, *args):
2780
    """Call function while all daemons are stopped.
2781

2782
    @type fn: callable
2783
    @param fn: Function to be called
2784

2785
    """
2786
    # Pause watcher by acquiring an exclusive lock on watcher state file
2787
    self.feedback_fn("Blocking watcher")
2788
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2789
    try:
2790
      # TODO: Currently, this just blocks. There's no timeout.
2791
      # TODO: Should it be a shared lock?
2792
      watcher_block.Exclusive(blocking=True)
2793

    
2794
      # Stop master daemons, so that no new jobs can come in and all running
2795
      # ones are finished
2796
      self.feedback_fn("Stopping master daemons")
2797
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2798
      try:
2799
        # Stop daemons on all nodes
2800
        for node_name in self.online_nodes:
2801
          self.feedback_fn("Stopping daemons on %s" % node_name)
2802
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2803

    
2804
        # All daemons are shut down now
2805
        try:
2806
          return fn(self, *args)
2807
        except Exception, err:
2808
          _, errmsg = FormatError(err)
2809
          logging.exception("Caught exception")
2810
          self.feedback_fn(errmsg)
2811
          raise
2812
      finally:
2813
        # Start cluster again, master node last
2814
        for node_name in self.nonmaster_nodes + [self.master_node]:
2815
          self.feedback_fn("Starting daemons on %s" % node_name)
2816
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2817
    finally:
2818
      # Resume watcher
2819
      watcher_block.Close()
2820

    
2821

    
2822
def RunWhileClusterStopped(feedback_fn, fn, *args):
2823
  """Calls a function while all cluster daemons are stopped.
2824

2825
  @type feedback_fn: callable
2826
  @param feedback_fn: Feedback function
2827
  @type fn: callable
2828
  @param fn: Function to be called when daemons are stopped
2829

2830
  """
2831
  feedback_fn("Gathering cluster information")
2832

    
2833
  # This ensures we're running on the master daemon
2834
  cl = GetClient()
2835
  # Query client
2836
  qcl = GetClient(query=True)
2837

    
2838
  (cluster_name, master_node) = \
2839
    cl.QueryConfigValues(["cluster_name", "master_node"])
2840

    
2841
  online_nodes = GetOnlineNodes([], cl=qcl)
2842
  ssh_ports = GetNodesSshPorts(online_nodes, qcl)
2843

    
2844
  # Don't keep a reference to the client. The master daemon will go away.
2845
  del cl
2846
  del qcl
2847

    
2848
  assert master_node in online_nodes
2849

    
2850
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2851
                                       online_nodes, ssh_ports).Call(fn, *args)
2852

    
2853

    
2854
def GenerateTable(headers, fields, separator, data,
2855
                  numfields=None, unitfields=None,
2856
                  units=None):
2857
  """Prints a table with headers and different fields.
2858

2859
  @type headers: dict
2860
  @param headers: dictionary mapping field names to headers for
2861
      the table
2862
  @type fields: list
2863
  @param fields: the field names corresponding to each row in
2864
      the data field
2865
  @param separator: the separator to be used; if this is None,
2866
      the default 'smart' algorithm is used which computes optimal
2867
      field width, otherwise just the separator is used between
2868
      each field
2869
  @type data: list
2870
  @param data: a list of lists, each sublist being one row to be output
2871
  @type numfields: list
2872
  @param numfields: a list with the fields that hold numeric
2873
      values and thus should be right-aligned
2874
  @type unitfields: list
2875
  @param unitfields: a list with the fields that hold numeric
2876
      values that should be formatted with the units field
2877
  @type units: string or None
2878
  @param units: the units we should use for formatting, or None for
2879
      automatic choice (human-readable for non-separator usage, otherwise
2880
      megabytes); this is a one-letter string
2881

2882
  """
2883
  if units is None:
2884
    if separator:
2885
      units = "m"
2886
    else:
2887
      units = "h"
2888

    
2889
  if numfields is None:
2890
    numfields = []
2891
  if unitfields is None:
2892
    unitfields = []
2893

    
2894
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2895
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2896

    
2897
  format_fields = []
2898
  for field in fields:
2899
    if headers and field not in headers:
2900
      # TODO: handle better unknown fields (either revert to old
2901
      # style of raising exception, or deal more intelligently with
2902
      # variable fields)
2903
      headers[field] = field
2904
    if separator is not None:
2905
      format_fields.append("%s")
2906
    elif numfields.Matches(field):
2907
      format_fields.append("%*s")
2908
    else:
2909
      format_fields.append("%-*s")
2910

    
2911
  if separator is None:
2912
    mlens = [0 for name in fields]
2913
    format_str = " ".join(format_fields)
2914
  else:
2915
    format_str = separator.replace("%", "%%").join(format_fields)
2916

    
2917
  for row in data:
2918
    if row is None:
2919
      continue
2920
    for idx, val in enumerate(row):
2921
      if unitfields.Matches(fields[idx]):
2922
        try:
2923
          val = int(val)
2924
        except (TypeError, ValueError):
2925
          pass
2926
        else:
2927
          val = row[idx] = utils.FormatUnit(val, units)
2928
      val = row[idx] = str(val)
2929
      if separator is None:
2930
        mlens[idx] = max(mlens[idx], len(val))
2931

    
2932
  result = []
2933
  if headers:
2934
    args = []
2935
    for idx, name in enumerate(fields):
2936
      hdr = headers[name]
2937
      if separator is None:
2938
        mlens[idx] = max(mlens[idx], len(hdr))
2939
        args.append(mlens[idx])
2940
      args.append(hdr)
2941
    result.append(format_str % tuple(args))
2942

    
2943
  if separator is None:
2944
    assert len(mlens) == len(fields)
2945

    
2946
    if fields and not numfields.Matches(fields[-1]):
2947
      mlens[-1] = 0
2948

    
2949
  for line in data:
2950
    args = []
2951
    if line is None:
2952
      line = ["-" for _ in fields]
2953
    for idx in range(len(fields)):
2954
      if separator is None:
2955
        args.append(mlens[idx])
2956
      args.append(line[idx])
2957
    result.append(format_str % tuple(args))
2958

    
2959
  return result
2960

    
2961

    
2962
def _FormatBool(value):
2963
  """Formats a boolean value as a string.
2964

2965
  """
2966
  if value:
2967
    return "Y"
2968
  return "N"
2969

    
2970

    
2971
#: Default formatting for query results; (callback, align right)
2972
_DEFAULT_FORMAT_QUERY = {
2973
  constants.QFT_TEXT: (str, False),
2974
  constants.QFT_BOOL: (_FormatBool, False),
2975
  constants.QFT_NUMBER: (str, True),
2976
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2977
  constants.QFT_OTHER: (str, False),
2978
  constants.QFT_UNKNOWN: (str, False),
2979
  }
2980

    
2981

    
2982
def _GetColumnFormatter(fdef, override, unit):
2983
  """Returns formatting function for a field.
2984

2985
  @type fdef: L{objects.QueryFieldDefinition}
2986
  @type override: dict
2987
  @param override: Dictionary for overriding field formatting functions,
2988
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2989
  @type unit: string
2990
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2991
  @rtype: tuple; (callable, bool)
2992
  @return: Returns the function to format a value (takes one parameter) and a
2993
    boolean for aligning the value on the right-hand side
2994

2995
  """
2996
  fmt = override.get(fdef.name, None)
2997
  if fmt is not None:
2998
    return fmt
2999

    
3000
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3001

    
3002
  if fdef.kind == constants.QFT_UNIT:
3003
    # Can't keep this information in the static dictionary
3004
    return (lambda value: utils.FormatUnit(value, unit), True)
3005

    
3006
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3007
  if fmt is not None:
3008
    return fmt
3009

    
3010
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3011

    
3012

    
3013
class _QueryColumnFormatter:
3014
  """Callable class for formatting fields of a query.
3015

3016
  """
3017
  def __init__(self, fn, status_fn, verbose):
3018
    """Initializes this class.
3019

3020
    @type fn: callable
3021
    @param fn: Formatting function
3022
    @type status_fn: callable
3023
    @param status_fn: Function to report fields' status
3024
    @type verbose: boolean
3025
    @param verbose: whether to use verbose field descriptions or not
3026

3027
    """
3028
    self._fn = fn
3029
    self._status_fn = status_fn
3030
    self._verbose = verbose
3031

    
3032
  def __call__(self, data):
3033
    """Returns a field's string representation.
3034

3035
    """
3036
    (status, value) = data
3037

    
3038
    # Report status
3039
    self._status_fn(status)
3040

    
3041
    if status == constants.RS_NORMAL:
3042
      return self._fn(value)
3043

    
3044
    assert value is None, \
3045
           "Found value %r for abnormal status %s" % (value, status)
3046

    
3047
    return FormatResultError(status, self._verbose)
3048

    
3049

    
3050
def FormatResultError(status, verbose):
3051
  """Formats result status other than L{constants.RS_NORMAL}.
3052

3053
  @param status: The result status
3054
  @type verbose: boolean
3055
  @param verbose: Whether to return the verbose text
3056
  @return: Text of result status
3057

3058
  """
3059
  assert status != constants.RS_NORMAL, \
3060
         "FormatResultError called with status equal to constants.RS_NORMAL"
3061
  try:
3062
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3063
  except KeyError:
3064
    raise NotImplementedError("Unknown status %s" % status)
3065
  else:
3066
    if verbose:
3067
      return verbose_text
3068
    return normal_text
3069

    
3070

    
3071
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3072
                      header=False, verbose=False):
3073
  """Formats data in L{objects.QueryResponse}.
3074

3075
  @type result: L{objects.QueryResponse}
3076
  @param result: result of query operation
3077
  @type unit: string
3078
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3079
    see L{utils.text.FormatUnit}
3080
  @type format_override: dict
3081
  @param format_override: Dictionary for overriding field formatting functions,
3082
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3083
  @type separator: string or None
3084
  @param separator: String used to separate fields
3085
  @type header: bool
3086
  @param header: Whether to output header row
3087
  @type verbose: boolean
3088
  @param verbose: whether to use verbose field descriptions or not
3089

3090
  """
3091
  if unit is None:
3092
    if separator:
3093
      unit = "m"
3094
    else:
3095
      unit = "h"
3096

    
3097
  if format_override is None:
3098
    format_override = {}
3099

    
3100
  stats = dict.fromkeys(constants.RS_ALL, 0)
3101

    
3102
  def _RecordStatus(status):
3103
    if status in stats:
3104
      stats[status] += 1
3105

    
3106
  columns = []
3107
  for fdef in result.fields:
3108
    assert fdef.title and fdef.name
3109
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3110
    columns.append(TableColumn(fdef.title,
3111
                               _QueryColumnFormatter(fn, _RecordStatus,
3112
                                                     verbose),
3113
                               align_right))
3114

    
3115
  table = FormatTable(result.data, columns, header, separator)
3116

    
3117
  # Collect statistics
3118
  assert len(stats) == len(constants.RS_ALL)
3119
  assert compat.all(count >= 0 for count in stats.values())
3120

    
3121
  # Determine overall status. If there was no data, unknown fields must be
3122
  # detected via the field definitions.
3123
  if (stats[constants.RS_UNKNOWN] or
3124
      (not result.data and _GetUnknownFields(result.fields))):
3125
    status = QR_UNKNOWN
3126
  elif compat.any(count > 0 for key, count in stats.items()
3127
                  if key != constants.RS_NORMAL):
3128
    status = QR_INCOMPLETE
3129
  else:
3130
    status = QR_NORMAL
3131

    
3132
  return (status, table)
3133

    
3134

    
3135
def _GetUnknownFields(fdefs):
3136
  """Returns list of unknown fields included in C{fdefs}.
3137

3138
  @type fdefs: list of L{objects.QueryFieldDefinition}
3139

3140
  """
3141
  return [fdef for fdef in fdefs
3142
          if fdef.kind == constants.QFT_UNKNOWN]
3143

    
3144

    
3145
def _WarnUnknownFields(fdefs):
3146
  """Prints a warning to stderr if a query included unknown fields.
3147

3148
  @type fdefs: list of L{objects.QueryFieldDefinition}
3149

3150
  """
3151
  unknown = _GetUnknownFields(fdefs)
3152
  if unknown:
3153
    ToStderr("Warning: Queried for unknown fields %s",
3154
             utils.CommaJoin(fdef.name for fdef in unknown))
3155
    return True
3156

    
3157
  return False
3158

    
3159

    
3160
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3161
                format_override=None, verbose=False, force_filter=False,
3162
                namefield=None, qfilter=None, isnumeric=False):
3163
  """Generic implementation for listing all items of a resource.
3164

3165
  @param resource: One of L{constants.QR_VIA_LUXI}
3166
  @type fields: list of strings
3167
  @param fields: List of fields to query for
3168
  @type names: list of strings
3169
  @param names: Names of items to query for
3170
  @type unit: string or None
3171
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3172
    None for automatic choice (human-readable for non-separator usage,
3173
    otherwise megabytes); this is a one-letter string
3174
  @type separator: string or None
3175
  @param separator: String used to separate fields
3176
  @type header: bool
3177
  @param header: Whether to show header row
3178
  @type force_filter: bool
3179
  @param force_filter: Whether to always treat names as filter
3180
  @type format_override: dict
3181
  @param format_override: Dictionary for overriding field formatting functions,
3182
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3183
  @type verbose: boolean
3184
  @param verbose: whether to use verbose field descriptions or not
3185
  @type namefield: string
3186
  @param namefield: Name of field to use for simple filters (see
3187
    L{qlang.MakeFilter} for details)
3188
  @type qfilter: list or None
3189
  @param qfilter: Query filter (in addition to names)
3190
  @param isnumeric: bool
3191
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3192
    any simple filters built by namefield should use integer values to
3193
    reflect that
3194

3195
  """
3196
  if not names:
3197
    names = None
3198

    
3199
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3200
                                isnumeric=isnumeric)
3201

    
3202
  if qfilter is None:
3203
    qfilter = namefilter
3204
  elif namefilter is not None:
3205
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3206

    
3207
  if cl is None:
3208
    cl = GetClient()
3209

    
3210
  response = cl.Query(resource, fields, qfilter)
3211

    
3212
  found_unknown = _WarnUnknownFields(response.fields)
3213

    
3214
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3215
                                     header=header,
3216
                                     format_override=format_override,
3217
                                     verbose=verbose)
3218

    
3219
  for line in data:
3220
    ToStdout(line)
3221

    
3222
  assert ((found_unknown and status == QR_UNKNOWN) or
3223
          (not found_unknown and status != QR_UNKNOWN))
3224

    
3225
  if status == QR_UNKNOWN:
3226
    return constants.EXIT_UNKNOWN_FIELD
3227

    
3228
  # TODO: Should the list command fail if not all data could be collected?
3229
  return constants.EXIT_SUCCESS
3230

    
3231

    
3232
def _FieldDescValues(fdef):
3233
  """Helper function for L{GenericListFields} to get query field description.
3234

3235
  @type fdef: L{objects.QueryFieldDefinition}
3236
  @rtype: list
3237

3238
  """
3239
  return [
3240
    fdef.name,
3241
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3242
    fdef.title,
3243
    fdef.doc,
3244
    ]
3245

    
3246

    
3247
def GenericListFields(resource, fields, separator, header, cl=None):
3248
  """Generic implementation for listing fields for a resource.
3249

3250
  @param resource: One of L{constants.QR_VIA_LUXI}
3251
  @type fields: list of strings
3252
  @param fields: List of fields to query for
3253
  @type separator: string or None
3254
  @param separator: String used to separate fields
3255
  @type header: bool
3256
  @param header: Whether to show header row
3257

3258
  """
3259
  if cl is None:
3260
    cl = GetClient()
3261

    
3262
  if not fields:
3263
    fields = None
3264

    
3265
  response = cl.QueryFields(resource, fields)
3266

    
3267
  found_unknown = _WarnUnknownFields(response.fields)
3268

    
3269
  columns = [
3270
    TableColumn("Name", str, False),
3271
    TableColumn("Type", str, False),
3272
    TableColumn("Title", str, False),
3273
    TableColumn("Description", str, False),
3274
    ]
3275

    
3276
  rows = map(_FieldDescValues, response.fields)
3277

    
3278
  for line in FormatTable(rows, columns, header, separator):
3279
    ToStdout(line)
3280

    
3281
  if found_unknown:
3282
    return constants.EXIT_UNKNOWN_FIELD
3283

    
3284
  return constants.EXIT_SUCCESS
3285

    
3286

    
3287
class TableColumn:
3288
  """Describes a column for L{FormatTable}.
3289

3290
  """
3291
  def __init__(self, title, fn, align_right):
3292
    """Initializes this class.
3293

3294
    @type title: string
3295
    @param title: Column title
3296
    @type fn: callable
3297
    @param fn: Formatting function
3298
    @type align_right: bool
3299
    @param align_right: Whether to align values on the right-hand side
3300

3301
    """
3302
    self.title = title
3303
    self.format = fn
3304
    self.align_right = align_right
3305

    
3306

    
3307
def _GetColFormatString(width, align_right):
3308
  """Returns the format string for a field.
3309

3310
  """
3311
  if align_right:
3312
    sign = ""
3313
  else:
3314
    sign = "-"
3315

    
3316
  return "%%%s%ss" % (sign, width)
3317

    
3318

    
3319
def FormatTable(rows, columns, header, separator):
3320
  """Formats data as a table.
3321

3322
  @type rows: list of lists
3323
  @param rows: Row data, one list per row
3324
  @type columns: list of L{TableColumn}
3325
  @param columns: Column descriptions
3326
  @type header: bool
3327
  @param header: Whether to show header row
3328
  @type separator: string or None
3329
  @param separator: String used to separate columns
3330

3331
  """
3332
  if header:
3333
    data = [[col.title for col in columns]]
3334
    colwidth = [len(col.title) for col in columns]
3335
  else:
3336
    data = []
3337
    colwidth = [0 for _ in columns]
3338

    
3339
  # Format row data
3340
  for row in rows:
3341
    assert len(row) == len(columns)
3342

    
3343
    formatted = [col.format(value) for value, col in zip(row, columns)]
3344

    
3345
    if separator is None:
3346
      # Update column widths
3347
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3348
        # Modifying a list's items while iterating is fine
3349
        colwidth[idx] = max(oldwidth, len(value))
3350

    
3351
    data.append(formatted)
3352

    
3353
  if separator is not None:
3354
    # Return early if a separator is used
3355
    return [separator.join(row) for row in data]
3356

    
3357
  if columns and not columns[-1].align_right:
3358
    # Avoid unnecessary spaces at end of line
3359
    colwidth[-1] = 0
3360

    
3361
  # Build format string
3362
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3363
                  for col, width in zip(columns, colwidth)])
3364

    
3365
  return [fmt % tuple(row) for row in data]
3366

    
3367

    
3368
def FormatTimestamp(ts):
3369
  """Formats a given timestamp.
3370

3371
  @type ts: timestamp
3372
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3373

3374
  @rtype: string
3375
  @return: a string with the formatted timestamp
3376

3377
  """
3378
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3379
    return "?"
3380

    
3381
  (sec, usecs) = ts
3382
  return utils.FormatTime(sec, usecs=usecs)
3383

    
3384

    
3385
def ParseTimespec(value):
3386
  """Parse a time specification.
3387

3388
  The following suffixed will be recognized:
3389

3390
    - s: seconds
3391
    - m: minutes
3392
    - h: hours
3393
    - d: day
3394
    - w: weeks
3395

3396
  Without any suffix, the value will be taken to be in seconds.
3397

3398
  """
3399
  value = str(value)
3400
  if not value:
3401
    raise errors.OpPrereqError("Empty time specification passed",
3402
                               errors.ECODE_INVAL)
3403
  suffix_map = {
3404
    "s": 1,
3405
    "m": 60,
3406
    "h": 3600,
3407
    "d": 86400,
3408
    "w": 604800,
3409
    }
3410
  if value[-1] not in suffix_map:
3411
    try:
3412
      value = int(value)
3413
    except (TypeError, ValueError):
3414
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3415
                                 errors.ECODE_INVAL)
3416
  else:
3417
    multiplier = suffix_map[value[-1]]
3418
    value = value[:-1]
3419
    if not value: # no data left after stripping the suffix
3420
      raise errors.OpPrereqError("Invalid time specification (only"
3421
                                 " suffix passed)", errors.ECODE_INVAL)
3422
    try:
3423
      value = int(value) * multiplier
3424
    except (TypeError, ValueError):
3425
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3426
                                 errors.ECODE_INVAL)
3427
  return value
3428

    
3429

    
3430
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3431
                   filter_master=False, nodegroup=None):
3432
  """Returns the names of online nodes.
3433

3434
  This function will also log a warning on stderr with the names of
3435
  the online nodes.
3436

3437
  @param nodes: if not empty, use only this subset of nodes (minus the
3438
      offline ones)
3439
  @param cl: if not None, luxi client to use
3440
  @type nowarn: boolean
3441
  @param nowarn: by default, this function will output a note with the
3442
      offline nodes that are skipped; if this parameter is True the
3443
      note is not displayed
3444
  @type secondary_ips: boolean
3445
  @param secondary_ips: if True, return the secondary IPs instead of the
3446
      names, useful for doing network traffic over the replication interface
3447
      (if any)
3448
  @type filter_master: boolean
3449
  @param filter_master: if True, do not return the master node in the list
3450
      (useful in coordination with secondary_ips where we cannot check our
3451
      node name against the list)
3452
  @type nodegroup: string
3453
  @param nodegroup: If set, only return nodes in this node group
3454

3455
  """
3456
  if cl is None:
3457
    cl = GetClient(query=True)
3458

    
3459
  qfilter = []
3460

    
3461
  if nodes:
3462
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3463

    
3464
  if nodegroup is not None:
3465
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3466
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3467

    
3468
  if filter_master:
3469
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3470

    
3471
  if qfilter:
3472
    if len(qfilter) > 1:
3473
      final_filter = [qlang.OP_AND] + qfilter
3474
    else:
3475
      assert len(qfilter) == 1
3476
      final_filter = qfilter[0]
3477
  else:
3478
    final_filter = None
3479

    
3480
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3481

    
3482
  def _IsOffline(row):
3483
    (_, (_, offline), _) = row
3484
    return offline
3485

    
3486
  def _GetName(row):
3487
    ((_, name), _, _) = row
3488
    return name
3489

    
3490
  def _GetSip(row):
3491
    (_, _, (_, sip)) = row
3492
    return sip
3493

    
3494
  (offline, online) = compat.partition(result.data, _IsOffline)
3495

    
3496
  if offline and not nowarn:
3497
    ToStderr("Note: skipping offline node(s): %s" %
3498
             utils.CommaJoin(map(_GetName, offline)))
3499

    
3500
  if secondary_ips:
3501
    fn = _GetSip
3502
  else:
3503
    fn = _GetName
3504

    
3505
  return map(fn, online)
3506

    
3507

    
3508
def GetNodesSshPorts(nodes, cl):
3509
  """Retrieves SSH ports of given nodes.
3510

3511
  @param nodes: the names of nodes
3512
  @type nodes: a list of strings
3513
  @param cl: a client to use for the query
3514
  @type cl: L{Client}
3515
  @return: the list of SSH ports corresponding to the nodes
3516
  @rtype: a list of tuples
3517
  """
3518
  return map(lambda t: t[0],
3519
             cl.QueryNodes(names=nodes,
3520
                           fields=["ndp/ssh_port"],
3521
                           use_locking=False))
3522

    
3523

    
3524
def _ToStream(stream, txt, *args):
3525
  """Write a message to a stream, bypassing the logging system
3526

3527
  @type stream: file object
3528
  @param stream: the file to which we should write
3529
  @type txt: str
3530
  @param txt: the message
3531

3532
  """
3533
  try:
3534
    if args:
3535
      args = tuple(args)
3536
      stream.write(txt % args)
3537
    else:
3538
      stream.write(txt)
3539
    stream.write("\n")
3540
    stream.flush()
3541
  except IOError, err:
3542
    if err.errno == errno.EPIPE:
3543
      # our terminal went away, we'll exit
3544
      sys.exit(constants.EXIT_FAILURE)
3545
    else:
3546
      raise
3547

    
3548

    
3549
def ToStdout(txt, *args):
3550
  """Write a message to stdout only, bypassing the logging system
3551

3552
  This is just a wrapper over _ToStream.
3553

3554
  @type txt: str
3555
  @param txt: the message
3556

3557
  """
3558
  _ToStream(sys.stdout, txt, *args)
3559

    
3560

    
3561
def ToStderr(txt, *args):
3562
  """Write a message to stderr only, bypassing the logging system
3563

3564
  This is just a wrapper over _ToStream.
3565

3566
  @type txt: str
3567
  @param txt: the message
3568

3569
  """
3570
  _ToStream(sys.stderr, txt, *args)
3571

    
3572

    
3573
class JobExecutor(object):
3574
  """Class which manages the submission and execution of multiple jobs.
3575

3576
  Note that instances of this class should not be reused between
3577
  GetResults() calls.
3578

3579
  """
3580
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3581
    self.queue = []
3582
    if cl is None:
3583
      cl = GetClient()
3584
    self.cl = cl
3585
    self.verbose = verbose
3586
    self.jobs = []
3587
    self.opts = opts
3588
    self.feedback_fn = feedback_fn
3589
    self._counter = itertools.count()
3590

    
3591
  @staticmethod
3592
  def _IfName(name, fmt):
3593
    """Helper function for formatting name.
3594

3595
    """
3596
    if name:
3597
      return fmt % name
3598

    
3599
    return ""
3600

    
3601
  def QueueJob(self, name, *ops):
3602
    """Record a job for later submit.
3603

3604
    @type name: string
3605
    @param name: a description of the job, will be used in WaitJobSet
3606

3607
    """
3608
    SetGenericOpcodeOpts(ops, self.opts)
3609
    self.queue.append((self._counter.next(), name, ops))
3610

    
3611
  def AddJobId(self, name, status, job_id):
3612
    """Adds a job ID to the internal queue.
3613

3614
    """
3615
    self.jobs.append((self._counter.next(), status, job_id, name))
3616

    
3617
  def SubmitPending(self, each=False):
3618
    """Submit all pending jobs.
3619

3620
    """
3621
    if each:
3622
      results = []
3623
      for (_, _, ops) in self.queue:
3624
        # SubmitJob will remove the success status, but raise an exception if
3625
        # the submission fails, so we'll notice that anyway.
3626
        results.append([True, self.cl.SubmitJob(ops)[0]])
3627
    else:
3628
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3629
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3630
      self.jobs.append((idx, status, data, name))
3631

    
3632
  def _ChooseJob(self):
3633
    """Choose a non-waiting/queued job to poll next.
3634

3635
    """
3636
    assert self.jobs, "_ChooseJob called with empty job list"
3637

    
3638
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3639
                               ["status"])
3640
    assert result
3641

    
3642
    for job_data, status in zip(self.jobs, result):
3643
      if (isinstance(status, list) and status and
3644
          status[0] in (constants.JOB_STATUS_QUEUED,
3645
                        constants.JOB_STATUS_WAITING,
3646
                        constants.JOB_STATUS_CANCELING)):
3647
        # job is still present and waiting
3648
        continue
3649
      # good candidate found (either running job or lost job)
3650
      self.jobs.remove(job_data)
3651
      return job_data
3652

    
3653
    # no job found
3654
    return self.jobs.pop(0)
3655

    
3656
  def GetResults(self):
3657
    """Wait for and return the results of all jobs.
3658

3659
    @rtype: list
3660
    @return: list of tuples (success, job results), in the same order
3661
        as the submitted jobs; if a job has failed, instead of the result
3662
        there will be the error message
3663

3664
    """
3665
    if not self.jobs:
3666
      self.SubmitPending()
3667
    results = []
3668
    if self.verbose:
3669
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3670
      if ok_jobs:
3671
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3672

    
3673
    # first, remove any non-submitted jobs
3674
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3675
    for idx, _, jid, name in failures:
3676
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3677
      results.append((idx, False, jid))
3678

    
3679
    while self.jobs:
3680
      (idx, _, jid, name) = self._ChooseJob()
3681
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3682
      try:
3683
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3684
        success = True
3685
      except errors.JobLost, err:
3686
        _, job_result = FormatError(err)
3687
        ToStderr("Job %s%s has been archived, cannot check its result",
3688
                 jid, self._IfName(name, " for %s"))
3689
        success = False
3690
      except (errors.GenericError, luxi.ProtocolError), err:
3691
        _, job_result = FormatError(err)
3692
        success = False
3693
        # the error message will always be shown, verbose or not
3694
        ToStderr("Job %s%s has failed: %s",
3695
                 jid, self._IfName(name, " for %s"), job_result)
3696

    
3697
      results.append((idx, success, job_result))
3698

    
3699
    # sort based on the index, then drop it
3700
    results.sort()
3701
    results = [i[1:] for i in results]
3702

    
3703
    return results
3704

    
3705
  def WaitOrShow(self, wait):
3706
    """Wait for job results or only print the job IDs.
3707

3708
    @type wait: boolean
3709
    @param wait: whether to wait or not
3710

3711
    """
3712
    if wait:
3713
      return self.GetResults()
3714
    else:
3715
      if not self.jobs:
3716
        self.SubmitPending()
3717
      for _, status, result, name in self.jobs:
3718
        if status:
3719
          ToStdout("%s: %s", result, name)
3720
        else:
3721
          ToStderr("Failure for %s: %s", name, result)
3722
      return [row[1:3] for row in self.jobs]
3723

    
3724

    
3725
def FormatParamsDictInfo(param_dict, actual):
3726
  """Formats a parameter dictionary.
3727

3728
  @type param_dict: dict
3729
  @param param_dict: the own parameters
3730
  @type actual: dict
3731
  @param actual: the current parameter set (including defaults)
3732
  @rtype: dict
3733
  @return: dictionary where the value of each parameter is either a fully
3734
      formatted string or a dictionary containing formatted strings
3735

3736
  """
3737
  ret = {}
3738
  for (key, data) in actual.items():
3739
    if isinstance(data, dict) and data:
3740
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3741
    else:
3742
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3743
  return ret
3744

    
3745

    
3746
def _FormatListInfoDefault(data, def_data):
3747
  if data is not None:
3748
    ret = utils.CommaJoin(data)
3749
  else:
3750
    ret = "default (%s)" % utils.CommaJoin(def_data)
3751
  return ret
3752

    
3753

    
3754
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3755
  """Formats an instance policy.
3756

3757
  @type custom_ipolicy: dict
3758
  @param custom_ipolicy: own policy
3759
  @type eff_ipolicy: dict
3760
  @param eff_ipolicy: effective policy (including defaults); ignored for
3761
      cluster
3762
  @type iscluster: bool
3763
  @param iscluster: the policy is at cluster level
3764
  @rtype: list of pairs
3765
  @return: formatted data, suitable for L{PrintGenericInfo}
3766

3767
  """
3768
  if iscluster:
3769
    eff_ipolicy = custom_ipolicy
3770

    
3771
  minmax_out = []
3772
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3773
  if custom_minmax:
3774
    for (k, minmax) in enumerate(custom_minmax):
3775
      minmax_out.append([
3776
        ("%s/%s" % (key, k),
3777
         FormatParamsDictInfo(minmax[key], minmax[key]))
3778
        for key in constants.ISPECS_MINMAX_KEYS
3779
        ])
3780
  else:
3781
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3782
      minmax_out.append([
3783
        ("%s/%s" % (key, k),
3784
         FormatParamsDictInfo({}, minmax[key]))
3785
        for key in constants.ISPECS_MINMAX_KEYS
3786
        ])
3787
  ret = [("bounds specs", minmax_out)]
3788

    
3789
  if iscluster:
3790
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3791
    ret.append(
3792
      (constants.ISPECS_STD,
3793
       FormatParamsDictInfo(stdspecs, stdspecs))
3794
      )
3795

    
3796
  ret.append(
3797
    ("allowed disk templates",
3798
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3799
                            eff_ipolicy[constants.IPOLICY_DTS]))
3800
    )
3801
  ret.extend([
3802
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3803
    for key in constants.IPOLICY_PARAMETERS
<