Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 18397489

History | View | Annotate | Download (136.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INCLUDEDEFAULTS_OPT",
112
  "INTERVAL_OPT",
113
  "MAC_PREFIX_OPT",
114
  "MAINTAIN_NODE_HEALTH_OPT",
115
  "MASTER_NETDEV_OPT",
116
  "MASTER_NETMASK_OPT",
117
  "MC_OPT",
118
  "MIGRATION_MODE_OPT",
119
  "MODIFY_ETCHOSTS_OPT",
120
  "NET_OPT",
121
  "NETWORK_OPT",
122
  "NETWORK6_OPT",
123
  "NEW_CLUSTER_CERT_OPT",
124
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
125
  "NEW_CONFD_HMAC_KEY_OPT",
126
  "NEW_RAPI_CERT_OPT",
127
  "NEW_PRIMARY_OPT",
128
  "NEW_SECONDARY_OPT",
129
  "NEW_SPICE_CERT_OPT",
130
  "NIC_PARAMS_OPT",
131
  "NOCONFLICTSCHECK_OPT",
132
  "NODE_FORCE_JOIN_OPT",
133
  "NODE_LIST_OPT",
134
  "NODE_PLACEMENT_OPT",
135
  "NODEGROUP_OPT",
136
  "NODE_PARAMS_OPT",
137
  "NODE_POWERED_OPT",
138
  "NODRBD_STORAGE_OPT",
139
  "NOHDR_OPT",
140
  "NOIPCHECK_OPT",
141
  "NO_INSTALL_OPT",
142
  "NONAMECHECK_OPT",
143
  "NOLVM_STORAGE_OPT",
144
  "NOMODIFY_ETCHOSTS_OPT",
145
  "NOMODIFY_SSH_SETUP_OPT",
146
  "NONICS_OPT",
147
  "NONLIVE_OPT",
148
  "NONPLUS1_OPT",
149
  "NORUNTIME_CHGS_OPT",
150
  "NOSHUTDOWN_OPT",
151
  "NOSTART_OPT",
152
  "NOSSH_KEYCHECK_OPT",
153
  "NOVOTING_OPT",
154
  "NO_REMEMBER_OPT",
155
  "NWSYNC_OPT",
156
  "OFFLINE_INST_OPT",
157
  "ONLINE_INST_OPT",
158
  "ON_PRIMARY_OPT",
159
  "ON_SECONDARY_OPT",
160
  "OFFLINE_OPT",
161
  "OSPARAMS_OPT",
162
  "OS_OPT",
163
  "OS_SIZE_OPT",
164
  "OOB_TIMEOUT_OPT",
165
  "POWER_DELAY_OPT",
166
  "PREALLOC_WIPE_DISKS_OPT",
167
  "PRIMARY_IP_VERSION_OPT",
168
  "PRIMARY_ONLY_OPT",
169
  "PRINT_JOBID_OPT",
170
  "PRIORITY_OPT",
171
  "RAPI_CERT_OPT",
172
  "READD_OPT",
173
  "REASON_OPT",
174
  "REBOOT_TYPE_OPT",
175
  "REMOVE_INSTANCE_OPT",
176
  "REMOVE_RESERVED_IPS_OPT",
177
  "REMOVE_UIDS_OPT",
178
  "RESERVED_LVS_OPT",
179
  "RUNTIME_MEM_OPT",
180
  "ROMAN_OPT",
181
  "SECONDARY_IP_OPT",
182
  "SECONDARY_ONLY_OPT",
183
  "SELECT_OS_OPT",
184
  "SEP_OPT",
185
  "SHOWCMD_OPT",
186
  "SHOW_MACHINE_OPT",
187
  "SHUTDOWN_TIMEOUT_OPT",
188
  "SINGLE_NODE_OPT",
189
  "SPECS_CPU_COUNT_OPT",
190
  "SPECS_DISK_COUNT_OPT",
191
  "SPECS_DISK_SIZE_OPT",
192
  "SPECS_MEM_SIZE_OPT",
193
  "SPECS_NIC_COUNT_OPT",
194
  "SPLIT_ISPECS_OPTS",
195
  "IPOLICY_STD_SPECS_OPT",
196
  "IPOLICY_DISK_TEMPLATES",
197
  "IPOLICY_VCPU_RATIO",
198
  "SPICE_CACERT_OPT",
199
  "SPICE_CERT_OPT",
200
  "SRC_DIR_OPT",
201
  "SRC_NODE_OPT",
202
  "SUBMIT_OPT",
203
  "SUBMIT_OPTS",
204
  "STARTUP_PAUSED_OPT",
205
  "STATIC_OPT",
206
  "SYNC_OPT",
207
  "TAG_ADD_OPT",
208
  "TAG_SRC_OPT",
209
  "TIMEOUT_OPT",
210
  "TO_GROUP_OPT",
211
  "UIDPOOL_OPT",
212
  "USEUNITS_OPT",
213
  "USE_EXTERNAL_MIP_SCRIPT",
214
  "USE_REPL_NET_OPT",
215
  "VERBOSE_OPT",
216
  "VG_NAME_OPT",
217
  "WFSYNC_OPT",
218
  "YES_DOIT_OPT",
219
  "DISK_STATE_OPT",
220
  "HV_STATE_OPT",
221
  "IGNORE_IPOLICY_OPT",
222
  "INSTANCE_POLICY_OPTS",
223
  # Generic functions for CLI programs
224
  "ConfirmOperation",
225
  "CreateIPolicyFromOpts",
226
  "GenericMain",
227
  "GenericInstanceCreate",
228
  "GenericList",
229
  "GenericListFields",
230
  "GetClient",
231
  "GetOnlineNodes",
232
  "JobExecutor",
233
  "JobSubmittedException",
234
  "ParseTimespec",
235
  "RunWhileClusterStopped",
236
  "SubmitOpCode",
237
  "SubmitOrSend",
238
  "UsesRPC",
239
  # Formatting functions
240
  "ToStderr", "ToStdout",
241
  "FormatError",
242
  "FormatQueryResult",
243
  "FormatParamsDictInfo",
244
  "FormatPolicyInfo",
245
  "PrintIPolicyCommand",
246
  "PrintGenericInfo",
247
  "GenerateTable",
248
  "AskUser",
249
  "FormatTimestamp",
250
  "FormatLogMessage",
251
  # Tags functions
252
  "ListTags",
253
  "AddTags",
254
  "RemoveTags",
255
  # command line options support infrastructure
256
  "ARGS_MANY_INSTANCES",
257
  "ARGS_MANY_NODES",
258
  "ARGS_MANY_GROUPS",
259
  "ARGS_MANY_NETWORKS",
260
  "ARGS_NONE",
261
  "ARGS_ONE_INSTANCE",
262
  "ARGS_ONE_NODE",
263
  "ARGS_ONE_GROUP",
264
  "ARGS_ONE_OS",
265
  "ARGS_ONE_NETWORK",
266
  "ArgChoice",
267
  "ArgCommand",
268
  "ArgFile",
269
  "ArgGroup",
270
  "ArgHost",
271
  "ArgInstance",
272
  "ArgJobId",
273
  "ArgNetwork",
274
  "ArgNode",
275
  "ArgOs",
276
  "ArgExtStorage",
277
  "ArgSuggest",
278
  "ArgUnknown",
279
  "OPT_COMPL_INST_ADD_NODES",
280
  "OPT_COMPL_MANY_NODES",
281
  "OPT_COMPL_ONE_IALLOCATOR",
282
  "OPT_COMPL_ONE_INSTANCE",
283
  "OPT_COMPL_ONE_NODE",
284
  "OPT_COMPL_ONE_NODEGROUP",
285
  "OPT_COMPL_ONE_NETWORK",
286
  "OPT_COMPL_ONE_OS",
287
  "OPT_COMPL_ONE_EXTSTORAGE",
288
  "cli_option",
289
  "SplitNodeOption",
290
  "CalculateOSNames",
291
  "ParseFields",
292
  "COMMON_CREATE_OPTS",
293
  ]
294

    
295
NO_PREFIX = "no_"
296
UN_PREFIX = "-"
297

    
298
#: Priorities (sorted)
299
_PRIORITY_NAMES = [
300
  ("low", constants.OP_PRIO_LOW),
301
  ("normal", constants.OP_PRIO_NORMAL),
302
  ("high", constants.OP_PRIO_HIGH),
303
  ]
304

    
305
#: Priority dictionary for easier lookup
306
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
307
# we migrate to Python 2.6
308
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
309

    
310
# Query result status for clients
311
(QR_NORMAL,
312
 QR_UNKNOWN,
313
 QR_INCOMPLETE) = range(3)
314

    
315
#: Maximum batch size for ChooseJob
316
_CHOOSE_BATCH = 25
317

    
318

    
319
# constants used to create InstancePolicy dictionary
320
TISPECS_GROUP_TYPES = {
321
  constants.ISPECS_MIN: constants.VTYPE_INT,
322
  constants.ISPECS_MAX: constants.VTYPE_INT,
323
  }
324

    
325
TISPECS_CLUSTER_TYPES = {
326
  constants.ISPECS_MIN: constants.VTYPE_INT,
327
  constants.ISPECS_MAX: constants.VTYPE_INT,
328
  constants.ISPECS_STD: constants.VTYPE_INT,
329
  }
330

    
331
#: User-friendly names for query2 field types
332
_QFT_NAMES = {
333
  constants.QFT_UNKNOWN: "Unknown",
334
  constants.QFT_TEXT: "Text",
335
  constants.QFT_BOOL: "Boolean",
336
  constants.QFT_NUMBER: "Number",
337
  constants.QFT_UNIT: "Storage size",
338
  constants.QFT_TIMESTAMP: "Timestamp",
339
  constants.QFT_OTHER: "Custom",
340
  }
341

    
342

    
343
class _Argument:
344
  def __init__(self, min=0, max=None): # pylint: disable=W0622
345
    self.min = min
346
    self.max = max
347

    
348
  def __repr__(self):
349
    return ("<%s min=%s max=%s>" %
350
            (self.__class__.__name__, self.min, self.max))
351

    
352

    
353
class ArgSuggest(_Argument):
354
  """Suggesting argument.
355

356
  Value can be any of the ones passed to the constructor.
357

358
  """
359
  # pylint: disable=W0622
360
  def __init__(self, min=0, max=None, choices=None):
361
    _Argument.__init__(self, min=min, max=max)
362
    self.choices = choices
363

    
364
  def __repr__(self):
365
    return ("<%s min=%s max=%s choices=%r>" %
366
            (self.__class__.__name__, self.min, self.max, self.choices))
367

    
368

    
369
class ArgChoice(ArgSuggest):
370
  """Choice argument.
371

372
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
373
  but value must be one of the choices.
374

375
  """
376

    
377

    
378
class ArgUnknown(_Argument):
379
  """Unknown argument to program (e.g. determined at runtime).
380

381
  """
382

    
383

    
384
class ArgInstance(_Argument):
385
  """Instances argument.
386

387
  """
388

    
389

    
390
class ArgNode(_Argument):
391
  """Node argument.
392

393
  """
394

    
395

    
396
class ArgNetwork(_Argument):
397
  """Network argument.
398

399
  """
400

    
401

    
402
class ArgGroup(_Argument):
403
  """Node group argument.
404

405
  """
406

    
407

    
408
class ArgJobId(_Argument):
409
  """Job ID argument.
410

411
  """
412

    
413

    
414
class ArgFile(_Argument):
415
  """File path argument.
416

417
  """
418

    
419

    
420
class ArgCommand(_Argument):
421
  """Command argument.
422

423
  """
424

    
425

    
426
class ArgHost(_Argument):
427
  """Host argument.
428

429
  """
430

    
431

    
432
class ArgOs(_Argument):
433
  """OS argument.
434

435
  """
436

    
437

    
438
class ArgExtStorage(_Argument):
439
  """ExtStorage argument.
440

441
  """
442

    
443

    
444
ARGS_NONE = []
445
ARGS_MANY_INSTANCES = [ArgInstance()]
446
ARGS_MANY_NETWORKS = [ArgNetwork()]
447
ARGS_MANY_NODES = [ArgNode()]
448
ARGS_MANY_GROUPS = [ArgGroup()]
449
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
450
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
451
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
452
# TODO
453
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
454
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
455

    
456

    
457
def _ExtractTagsObject(opts, args):
458
  """Extract the tag type object.
459

460
  Note that this function will modify its args parameter.
461

462
  """
463
  if not hasattr(opts, "tag_type"):
464
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
465
  kind = opts.tag_type
466
  if kind == constants.TAG_CLUSTER:
467
    retval = kind, None
468
  elif kind in (constants.TAG_NODEGROUP,
469
                constants.TAG_NODE,
470
                constants.TAG_NETWORK,
471
                constants.TAG_INSTANCE):
472
    if not args:
473
      raise errors.OpPrereqError("no arguments passed to the command",
474
                                 errors.ECODE_INVAL)
475
    name = args.pop(0)
476
    retval = kind, name
477
  else:
478
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
479
  return retval
480

    
481

    
482
def _ExtendTags(opts, args):
483
  """Extend the args if a source file has been given.
484

485
  This function will extend the tags with the contents of the file
486
  passed in the 'tags_source' attribute of the opts parameter. A file
487
  named '-' will be replaced by stdin.
488

489
  """
490
  fname = opts.tags_source
491
  if fname is None:
492
    return
493
  if fname == "-":
494
    new_fh = sys.stdin
495
  else:
496
    new_fh = open(fname, "r")
497
  new_data = []
498
  try:
499
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
500
    # because of python bug 1633941
501
    while True:
502
      line = new_fh.readline()
503
      if not line:
504
        break
505
      new_data.append(line.strip())
506
  finally:
507
    new_fh.close()
508
  args.extend(new_data)
509

    
510

    
511
def ListTags(opts, args):
512
  """List the tags on a given object.
513

514
  This is a generic implementation that knows how to deal with all
515
  three cases of tag objects (cluster, node, instance). The opts
516
  argument is expected to contain a tag_type field denoting what
517
  object type we work on.
518

519
  """
520
  kind, name = _ExtractTagsObject(opts, args)
521
  cl = GetClient(query=True)
522
  result = cl.QueryTags(kind, name)
523
  result = list(result)
524
  result.sort()
525
  for tag in result:
526
    ToStdout(tag)
527

    
528

    
529
def AddTags(opts, args):
530
  """Add tags on a given object.
531

532
  This is a generic implementation that knows how to deal with all
533
  three cases of tag objects (cluster, node, instance). The opts
534
  argument is expected to contain a tag_type field denoting what
535
  object type we work on.
536

537
  """
538
  kind, name = _ExtractTagsObject(opts, args)
539
  _ExtendTags(opts, args)
540
  if not args:
541
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
542
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
543
  SubmitOrSend(op, opts)
544

    
545

    
546
def RemoveTags(opts, args):
547
  """Remove tags from a given object.
548

549
  This is a generic implementation that knows how to deal with all
550
  three cases of tag objects (cluster, node, instance). The opts
551
  argument is expected to contain a tag_type field denoting what
552
  object type we work on.
553

554
  """
555
  kind, name = _ExtractTagsObject(opts, args)
556
  _ExtendTags(opts, args)
557
  if not args:
558
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
559
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
560
  SubmitOrSend(op, opts)
561

    
562

    
563
def check_unit(option, opt, value): # pylint: disable=W0613
564
  """OptParsers custom converter for units.
565

566
  """
567
  try:
568
    return utils.ParseUnit(value)
569
  except errors.UnitParseError, err:
570
    raise OptionValueError("option %s: %s" % (opt, err))
571

    
572

    
573
def _SplitKeyVal(opt, data, parse_prefixes):
574
  """Convert a KeyVal string into a dict.
575

576
  This function will convert a key=val[,...] string into a dict. Empty
577
  values will be converted specially: keys which have the prefix 'no_'
578
  will have the value=False and the prefix stripped, keys with the prefix
579
  "-" will have value=None and the prefix stripped, and the others will
580
  have value=True.
581

582
  @type opt: string
583
  @param opt: a string holding the option name for which we process the
584
      data, used in building error messages
585
  @type data: string
586
  @param data: a string of the format key=val,key=val,...
587
  @type parse_prefixes: bool
588
  @param parse_prefixes: whether to handle prefixes specially
589
  @rtype: dict
590
  @return: {key=val, key=val}
591
  @raises errors.ParameterError: if there are duplicate keys
592

593
  """
594
  kv_dict = {}
595
  if data:
596
    for elem in utils.UnescapeAndSplit(data, sep=","):
597
      if "=" in elem:
598
        key, val = elem.split("=", 1)
599
      elif parse_prefixes:
600
        if elem.startswith(NO_PREFIX):
601
          key, val = elem[len(NO_PREFIX):], False
602
        elif elem.startswith(UN_PREFIX):
603
          key, val = elem[len(UN_PREFIX):], None
604
        else:
605
          key, val = elem, True
606
      else:
607
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
608
                                    (elem, opt))
609
      if key in kv_dict:
610
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
611
                                    (key, opt))
612
      kv_dict[key] = val
613
  return kv_dict
614

    
615

    
616
def _SplitIdentKeyVal(opt, value, parse_prefixes):
617
  """Helper function to parse "ident:key=val,key=val" options.
618

619
  @type opt: string
620
  @param opt: option name, used in error messages
621
  @type value: string
622
  @param value: expected to be in the format "ident:key=val,key=val,..."
623
  @type parse_prefixes: bool
624
  @param parse_prefixes: whether to handle prefixes specially (see
625
      L{_SplitKeyVal})
626
  @rtype: tuple
627
  @return: (ident, {key=val, key=val})
628
  @raises errors.ParameterError: in case of duplicates or other parsing errors
629

630
  """
631
  if ":" not in value:
632
    ident, rest = value, ""
633
  else:
634
    ident, rest = value.split(":", 1)
635

    
636
  if parse_prefixes and ident.startswith(NO_PREFIX):
637
    if rest:
638
      msg = "Cannot pass options when removing parameter groups: %s" % value
639
      raise errors.ParameterError(msg)
640
    retval = (ident[len(NO_PREFIX):], False)
641
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
642
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
643
    if rest:
644
      msg = "Cannot pass options when removing parameter groups: %s" % value
645
      raise errors.ParameterError(msg)
646
    retval = (ident[len(UN_PREFIX):], None)
647
  else:
648
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
649
    retval = (ident, kv_dict)
650
  return retval
651

    
652

    
653
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
654
  """Custom parser for ident:key=val,key=val options.
655

656
  This will store the parsed values as a tuple (ident, {key: val}). As such,
657
  multiple uses of this option via action=append is possible.
658

659
  """
660
  return _SplitIdentKeyVal(opt, value, True)
661

    
662

    
663
def check_key_val(option, opt, value):  # pylint: disable=W0613
664
  """Custom parser class for key=val,key=val options.
665

666
  This will store the parsed values as a dict {key: val}.
667

668
  """
669
  return _SplitKeyVal(opt, value, True)
670

    
671

    
672
def _SplitListKeyVal(opt, value):
673
  retval = {}
674
  for elem in value.split("/"):
675
    if not elem:
676
      raise errors.ParameterError("Empty section in option '%s'" % opt)
677
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
678
    if ident in retval:
679
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
680
             (ident, opt, elem))
681
      raise errors.ParameterError(msg)
682
    retval[ident] = valdict
683
  return retval
684

    
685

    
686
def check_multilist_ident_key_val(_, opt, value):
687
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
688

689
  @rtype: list of dictionary
690
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
691

692
  """
693
  retval = []
694
  for line in value.split("//"):
695
    retval.append(_SplitListKeyVal(opt, line))
696
  return retval
697

    
698

    
699
def check_bool(option, opt, value): # pylint: disable=W0613
700
  """Custom parser for yes/no options.
701

702
  This will store the parsed value as either True or False.
703

704
  """
705
  value = value.lower()
706
  if value == constants.VALUE_FALSE or value == "no":
707
    return False
708
  elif value == constants.VALUE_TRUE or value == "yes":
709
    return True
710
  else:
711
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
712

    
713

    
714
def check_list(option, opt, value): # pylint: disable=W0613
715
  """Custom parser for comma-separated lists.
716

717
  """
718
  # we have to make this explicit check since "".split(",") is [""],
719
  # not an empty list :(
720
  if not value:
721
    return []
722
  else:
723
    return utils.UnescapeAndSplit(value)
724

    
725

    
726
def check_maybefloat(option, opt, value): # pylint: disable=W0613
727
  """Custom parser for float numbers which might be also defaults.
728

729
  """
730
  value = value.lower()
731

    
732
  if value == constants.VALUE_DEFAULT:
733
    return value
734
  else:
735
    return float(value)
736

    
737

    
738
# completion_suggestion is normally a list. Using numeric values not evaluating
739
# to False for dynamic completion.
740
(OPT_COMPL_MANY_NODES,
741
 OPT_COMPL_ONE_NODE,
742
 OPT_COMPL_ONE_INSTANCE,
743
 OPT_COMPL_ONE_OS,
744
 OPT_COMPL_ONE_EXTSTORAGE,
745
 OPT_COMPL_ONE_IALLOCATOR,
746
 OPT_COMPL_ONE_NETWORK,
747
 OPT_COMPL_INST_ADD_NODES,
748
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
749

    
750
OPT_COMPL_ALL = compat.UniqueFrozenset([
751
  OPT_COMPL_MANY_NODES,
752
  OPT_COMPL_ONE_NODE,
753
  OPT_COMPL_ONE_INSTANCE,
754
  OPT_COMPL_ONE_OS,
755
  OPT_COMPL_ONE_EXTSTORAGE,
756
  OPT_COMPL_ONE_IALLOCATOR,
757
  OPT_COMPL_ONE_NETWORK,
758
  OPT_COMPL_INST_ADD_NODES,
759
  OPT_COMPL_ONE_NODEGROUP,
760
  ])
761

    
762

    
763
class CliOption(Option):
764
  """Custom option class for optparse.
765

766
  """
767
  ATTRS = Option.ATTRS + [
768
    "completion_suggest",
769
    ]
770
  TYPES = Option.TYPES + (
771
    "multilistidentkeyval",
772
    "identkeyval",
773
    "keyval",
774
    "unit",
775
    "bool",
776
    "list",
777
    "maybefloat",
778
    )
779
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
780
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
781
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
782
  TYPE_CHECKER["keyval"] = check_key_val
783
  TYPE_CHECKER["unit"] = check_unit
784
  TYPE_CHECKER["bool"] = check_bool
785
  TYPE_CHECKER["list"] = check_list
786
  TYPE_CHECKER["maybefloat"] = check_maybefloat
787

    
788

    
789
# optparse.py sets make_option, so we do it for our own option class, too
790
cli_option = CliOption
791

    
792

    
793
_YORNO = "yes|no"
794

    
795
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
796
                       help="Increase debugging level")
797

    
798
NOHDR_OPT = cli_option("--no-headers", default=False,
799
                       action="store_true", dest="no_headers",
800
                       help="Don't display column headers")
801

    
802
SEP_OPT = cli_option("--separator", default=None,
803
                     action="store", dest="separator",
804
                     help=("Separator between output fields"
805
                           " (defaults to one space)"))
806

    
807
USEUNITS_OPT = cli_option("--units", default=None,
808
                          dest="units", choices=("h", "m", "g", "t"),
809
                          help="Specify units for output (one of h/m/g/t)")
810

    
811
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
812
                        type="string", metavar="FIELDS",
813
                        help="Comma separated list of output fields")
814

    
815
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
816
                       default=False, help="Force the operation")
817

    
818
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
819
                         default=False, help="Do not require confirmation")
820

    
821
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
822
                                  action="store_true", default=False,
823
                                  help=("Ignore offline nodes and do as much"
824
                                        " as possible"))
825

    
826
TAG_ADD_OPT = cli_option("--tags", dest="tags",
827
                         default=None, help="Comma-separated list of instance"
828
                                            " tags")
829

    
830
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
831
                         default=None, help="File with tag names")
832

    
833
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
834
                        default=False, action="store_true",
835
                        help=("Submit the job and return the job ID, but"
836
                              " don't wait for the job to finish"))
837

    
838
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
839
                             default=False, action="store_true",
840
                             help=("Additionally print the job as first line"
841
                                   " on stdout (for scripting)."))
842

    
843
SYNC_OPT = cli_option("--sync", dest="do_locking",
844
                      default=False, action="store_true",
845
                      help=("Grab locks while doing the queries"
846
                            " in order to ensure more consistent results"))
847

    
848
DRY_RUN_OPT = cli_option("--dry-run", default=False,
849
                         action="store_true",
850
                         help=("Do not execute the operation, just run the"
851
                               " check steps and verify if it could be"
852
                               " executed"))
853

    
854
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
855
                         action="store_true",
856
                         help="Increase the verbosity of the operation")
857

    
858
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
859
                              action="store_true", dest="simulate_errors",
860
                              help="Debugging option that makes the operation"
861
                              " treat most runtime checks as failed")
862

    
863
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
864
                        default=True, action="store_false",
865
                        help="Don't wait for sync (DANGEROUS!)")
866

    
867
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
868
                        default=False, action="store_true",
869
                        help="Wait for disks to sync")
870

    
871
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
872
                             action="store_true", default=False,
873
                             help="Enable offline instance")
874

    
875
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
876
                              action="store_true", default=False,
877
                              help="Disable down instance")
878

    
879
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
880
                               help=("Custom disk setup (%s)" %
881
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
882
                               default=None, metavar="TEMPL",
883
                               choices=list(constants.DISK_TEMPLATES))
884

    
885
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
886
                        help="Do not create any network cards for"
887
                        " the instance")
888

    
889
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
890
                               help="Relative path under default cluster-wide"
891
                               " file storage dir to store file-based disks",
892
                               default=None, metavar="<DIR>")
893

    
894
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
895
                                  help="Driver to use for image files",
896
                                  default="loop", metavar="<DRIVER>",
897
                                  choices=list(constants.FILE_DRIVER))
898

    
899
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
900
                            help="Select nodes for the instance automatically"
901
                            " using the <NAME> iallocator plugin",
902
                            default=None, type="string",
903
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
904

    
905
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
906
                                    metavar="<NAME>",
907
                                    help="Set the default instance"
908
                                    " allocator plugin",
909
                                    default=None, type="string",
910
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
911

    
912
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
913
                    metavar="<os>",
914
                    completion_suggest=OPT_COMPL_ONE_OS)
915

    
916
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
917
                          type="keyval", default={},
918
                          help="OS parameters")
919

    
920
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
921
                               action="store_true", default=False,
922
                               help="Force an unknown variant")
923

    
924
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
925
                            action="store_true", default=False,
926
                            help="Do not install the OS (will"
927
                            " enable no-start)")
928

    
929
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
930
                                dest="allow_runtime_chgs",
931
                                default=True, action="store_false",
932
                                help="Don't allow runtime changes")
933

    
934
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
935
                         type="keyval", default={},
936
                         help="Backend parameters")
937

    
938
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
939
                        default={}, dest="hvparams",
940
                        help="Hypervisor parameters")
941

    
942
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
943
                             help="Disk template parameters, in the format"
944
                             " template:option=value,option=value,...",
945
                             type="identkeyval", action="append", default=[])
946

    
947
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
948
                                 type="keyval", default={},
949
                                 help="Memory size specs: list of key=value,"
950
                                " where key is one of min, max, std"
951
                                 " (in MB or using a unit)")
952

    
953
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
954
                                 type="keyval", default={},
955
                                 help="CPU count specs: list of key=value,"
956
                                 " where key is one of min, max, std")
957

    
958
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
959
                                  dest="ispecs_disk_count",
960
                                  type="keyval", default={},
961
                                  help="Disk count specs: list of key=value,"
962
                                  " where key is one of min, max, std")
963

    
964
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
965
                                 type="keyval", default={},
966
                                 help="Disk size specs: list of key=value,"
967
                                 " where key is one of min, max, std"
968
                                 " (in MB or using a unit)")
969

    
970
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
971
                                 type="keyval", default={},
972
                                 help="NIC count specs: list of key=value,"
973
                                 " where key is one of min, max, std")
974

    
975
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
976
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
977
                                      dest="ipolicy_bounds_specs",
978
                                      type="multilistidentkeyval", default=None,
979
                                      help="Complete instance specs limits")
980

    
981
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
982
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
983
                                   dest="ipolicy_std_specs",
984
                                   type="keyval", default=None,
985
                                   help="Complte standard instance specs")
986

    
987
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
988
                                    dest="ipolicy_disk_templates",
989
                                    type="list", default=None,
990
                                    help="Comma-separated list of"
991
                                    " enabled disk templates")
992

    
993
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
994
                                 dest="ipolicy_vcpu_ratio",
995
                                 type="maybefloat", default=None,
996
                                 help="The maximum allowed vcpu-to-cpu ratio")
997

    
998
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
999
                                   dest="ipolicy_spindle_ratio",
1000
                                   type="maybefloat", default=None,
1001
                                   help=("The maximum allowed instances to"
1002
                                         " spindle ratio"))
1003

    
1004
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1005
                            help="Hypervisor and hypervisor options, in the"
1006
                            " format hypervisor:option=value,option=value,...",
1007
                            default=None, type="identkeyval")
1008

    
1009
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1010
                        help="Hypervisor and hypervisor options, in the"
1011
                        " format hypervisor:option=value,option=value,...",
1012
                        default=[], action="append", type="identkeyval")
1013

    
1014
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1015
                           action="store_false",
1016
                           help="Don't check that the instance's IP"
1017
                           " is alive")
1018

    
1019
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1020
                             default=True, action="store_false",
1021
                             help="Don't check that the instance's name"
1022
                             " is resolvable")
1023

    
1024
NET_OPT = cli_option("--net",
1025
                     help="NIC parameters", default=[],
1026
                     dest="nics", action="append", type="identkeyval")
1027

    
1028
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1029
                      dest="disks", action="append", type="identkeyval")
1030

    
1031
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1032
                         help="Comma-separated list of disks"
1033
                         " indices to act on (e.g. 0,2) (optional,"
1034
                         " defaults to all disks)")
1035

    
1036
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1037
                         help="Enforces a single-disk configuration using the"
1038
                         " given disk size, in MiB unless a suffix is used",
1039
                         default=None, type="unit", metavar="<size>")
1040

    
1041
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1042
                                dest="ignore_consistency",
1043
                                action="store_true", default=False,
1044
                                help="Ignore the consistency of the disks on"
1045
                                " the secondary")
1046

    
1047
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1048
                                dest="allow_failover",
1049
                                action="store_true", default=False,
1050
                                help="If migration is not possible fallback to"
1051
                                     " failover")
1052

    
1053
NONLIVE_OPT = cli_option("--non-live", dest="live",
1054
                         default=True, action="store_false",
1055
                         help="Do a non-live migration (this usually means"
1056
                         " freeze the instance, save the state, transfer and"
1057
                         " only then resume running on the secondary node)")
1058

    
1059
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1060
                                default=None,
1061
                                choices=list(constants.HT_MIGRATION_MODES),
1062
                                help="Override default migration mode (choose"
1063
                                " either live or non-live")
1064

    
1065
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1066
                                help="Target node and optional secondary node",
1067
                                metavar="<pnode>[:<snode>]",
1068
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1069

    
1070
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1071
                           action="append", metavar="<node>",
1072
                           help="Use only this node (can be used multiple"
1073
                           " times, if not given defaults to all nodes)",
1074
                           completion_suggest=OPT_COMPL_ONE_NODE)
1075

    
1076
NODEGROUP_OPT_NAME = "--node-group"
1077
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1078
                           dest="nodegroup",
1079
                           help="Node group (name or uuid)",
1080
                           metavar="<nodegroup>",
1081
                           default=None, type="string",
1082
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1083

    
1084
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1085
                             metavar="<node>",
1086
                             completion_suggest=OPT_COMPL_ONE_NODE)
1087

    
1088
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1089
                         action="store_false",
1090
                         help="Don't start the instance after creation")
1091

    
1092
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1093
                         action="store_true", default=False,
1094
                         help="Show command instead of executing it")
1095

    
1096
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1097
                         default=False, action="store_true",
1098
                         help="Instead of performing the migration/failover,"
1099
                         " try to recover from a failed cleanup. This is safe"
1100
                         " to run even if the instance is healthy, but it"
1101
                         " will create extra replication traffic and "
1102
                         " disrupt briefly the replication (like during the"
1103
                         " migration/failover")
1104

    
1105
STATIC_OPT = cli_option("-s", "--static", dest="static",
1106
                        action="store_true", default=False,
1107
                        help="Only show configuration data, not runtime data")
1108

    
1109
ALL_OPT = cli_option("--all", dest="show_all",
1110
                     default=False, action="store_true",
1111
                     help="Show info on all instances on the cluster."
1112
                     " This can take a long time to run, use wisely")
1113

    
1114
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1115
                           action="store_true", default=False,
1116
                           help="Interactive OS reinstall, lists available"
1117
                           " OS templates for selection")
1118

    
1119
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1120
                                 action="store_true", default=False,
1121
                                 help="Remove the instance from the cluster"
1122
                                 " configuration even if there are failures"
1123
                                 " during the removal process")
1124

    
1125
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1126
                                        dest="ignore_remove_failures",
1127
                                        action="store_true", default=False,
1128
                                        help="Remove the instance from the"
1129
                                        " cluster configuration even if there"
1130
                                        " are failures during the removal"
1131
                                        " process")
1132

    
1133
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1134
                                 action="store_true", default=False,
1135
                                 help="Remove the instance from the cluster")
1136

    
1137
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1138
                               help="Specifies the new node for the instance",
1139
                               metavar="NODE", default=None,
1140
                               completion_suggest=OPT_COMPL_ONE_NODE)
1141

    
1142
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1143
                               help="Specifies the new secondary node",
1144
                               metavar="NODE", default=None,
1145
                               completion_suggest=OPT_COMPL_ONE_NODE)
1146

    
1147
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1148
                             help="Specifies the new primary node",
1149
                             metavar="<node>", default=None,
1150
                             completion_suggest=OPT_COMPL_ONE_NODE)
1151

    
1152
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1153
                            default=False, action="store_true",
1154
                            help="Replace the disk(s) on the primary"
1155
                                 " node (applies only to internally mirrored"
1156
                                 " disk templates, e.g. %s)" %
1157
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1158

    
1159
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1160
                              default=False, action="store_true",
1161
                              help="Replace the disk(s) on the secondary"
1162
                                   " node (applies only to internally mirrored"
1163
                                   " disk templates, e.g. %s)" %
1164
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1165

    
1166
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1167
                              default=False, action="store_true",
1168
                              help="Lock all nodes and auto-promote as needed"
1169
                              " to MC status")
1170

    
1171
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1172
                              default=False, action="store_true",
1173
                              help="Automatically replace faulty disks"
1174
                                   " (applies only to internally mirrored"
1175
                                   " disk templates, e.g. %s)" %
1176
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1177

    
1178
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1179
                             default=False, action="store_true",
1180
                             help="Ignore current recorded size"
1181
                             " (useful for forcing activation when"
1182
                             " the recorded size is wrong)")
1183

    
1184
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1185
                          metavar="<node>",
1186
                          completion_suggest=OPT_COMPL_ONE_NODE)
1187

    
1188
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1189
                         metavar="<dir>")
1190

    
1191
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1192
                              help="Specify the secondary ip for the node",
1193
                              metavar="ADDRESS", default=None)
1194

    
1195
READD_OPT = cli_option("--readd", dest="readd",
1196
                       default=False, action="store_true",
1197
                       help="Readd old node after replacing it")
1198

    
1199
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1200
                                default=True, action="store_false",
1201
                                help="Disable SSH key fingerprint checking")
1202

    
1203
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1204
                                 default=False, action="store_true",
1205
                                 help="Force the joining of a node")
1206

    
1207
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1208
                    type="bool", default=None, metavar=_YORNO,
1209
                    help="Set the master_candidate flag on the node")
1210

    
1211
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1212
                         type="bool", default=None,
1213
                         help=("Set the offline flag on the node"
1214
                               " (cluster does not communicate with offline"
1215
                               " nodes)"))
1216

    
1217
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1218
                         type="bool", default=None,
1219
                         help=("Set the drained flag on the node"
1220
                               " (excluded from allocation operations)"))
1221

    
1222
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1223
                              type="bool", default=None, metavar=_YORNO,
1224
                              help="Set the master_capable flag on the node")
1225

    
1226
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1227
                          type="bool", default=None, metavar=_YORNO,
1228
                          help="Set the vm_capable flag on the node")
1229

    
1230
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1231
                             type="bool", default=None, metavar=_YORNO,
1232
                             help="Set the allocatable flag on a volume")
1233

    
1234
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1235
                               help="Disable support for lvm based instances"
1236
                               " (cluster-wide)",
1237
                               action="store_false", default=True)
1238

    
1239
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1240
                            dest="enabled_hypervisors",
1241
                            help="Comma-separated list of hypervisors",
1242
                            type="string", default=None)
1243

    
1244
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1245
                                        dest="enabled_disk_templates",
1246
                                        help="Comma-separated list of "
1247
                                             "disk templates",
1248
                                        type="string", default=None)
1249

    
1250
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1251
                            type="keyval", default={},
1252
                            help="NIC parameters")
1253

    
1254
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1255
                         dest="candidate_pool_size", type="int",
1256
                         help="Set the candidate pool size")
1257

    
1258
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1259
                         help=("Enables LVM and specifies the volume group"
1260
                               " name (cluster-wide) for disk allocation"
1261
                               " [%s]" % constants.DEFAULT_VG),
1262
                         metavar="VG", default=None)
1263

    
1264
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1265
                          help="Destroy cluster", action="store_true")
1266

    
1267
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1268
                          help="Skip node agreement check (dangerous)",
1269
                          action="store_true", default=False)
1270

    
1271
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1272
                            help="Specify the mac prefix for the instance IP"
1273
                            " addresses, in the format XX:XX:XX",
1274
                            metavar="PREFIX",
1275
                            default=None)
1276

    
1277
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1278
                               help="Specify the node interface (cluster-wide)"
1279
                               " on which the master IP address will be added"
1280
                               " (cluster init default: %s)" %
1281
                               constants.DEFAULT_BRIDGE,
1282
                               metavar="NETDEV",
1283
                               default=None)
1284

    
1285
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1286
                                help="Specify the netmask of the master IP",
1287
                                metavar="NETMASK",
1288
                                default=None)
1289

    
1290
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1291
                                     dest="use_external_mip_script",
1292
                                     help="Specify whether to run a"
1293
                                     " user-provided script for the master"
1294
                                     " IP address turnup and"
1295
                                     " turndown operations",
1296
                                     type="bool", metavar=_YORNO, default=None)
1297

    
1298
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1299
                                help="Specify the default directory (cluster-"
1300
                                "wide) for storing the file-based disks [%s]" %
1301
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1302
                                metavar="DIR",
1303
                                default=None)
1304

    
1305
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1306
  "--shared-file-storage-dir",
1307
  dest="shared_file_storage_dir",
1308
  help="Specify the default directory (cluster-wide) for storing the"
1309
  " shared file-based disks [%s]" %
1310
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1311
  metavar="SHAREDDIR", default=None)
1312

    
1313
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1314
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1315
                                   action="store_false", default=True)
1316

    
1317
MODIFY_ETCHOSTS_OPT = \
1318
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1319
            default=None, type="bool",
1320
            help="Defines whether the cluster should autonomously modify"
1321
            " and keep in sync the /etc/hosts file of the nodes")
1322

    
1323
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1324
                                    help="Don't initialize SSH keys",
1325
                                    action="store_false", default=True)
1326

    
1327
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1328
                             help="Enable parseable error messages",
1329
                             action="store_true", default=False)
1330

    
1331
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1332
                          help="Skip N+1 memory redundancy tests",
1333
                          action="store_true", default=False)
1334

    
1335
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1336
                             help="Type of reboot: soft/hard/full",
1337
                             default=constants.INSTANCE_REBOOT_HARD,
1338
                             metavar="<REBOOT>",
1339
                             choices=list(constants.REBOOT_TYPES))
1340

    
1341
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1342
                                    dest="ignore_secondaries",
1343
                                    default=False, action="store_true",
1344
                                    help="Ignore errors from secondaries")
1345

    
1346
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1347
                            action="store_false", default=True,
1348
                            help="Don't shutdown the instance (unsafe)")
1349

    
1350
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1351
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1352
                         help="Maximum time to wait")
1353

    
1354
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1355
                                  dest="shutdown_timeout", type="int",
1356
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1357
                                  help="Maximum time to wait for instance"
1358
                                  " shutdown")
1359

    
1360
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1361
                          default=None,
1362
                          help=("Number of seconds between repetions of the"
1363
                                " command"))
1364

    
1365
EARLY_RELEASE_OPT = cli_option("--early-release",
1366
                               dest="early_release", default=False,
1367
                               action="store_true",
1368
                               help="Release the locks on the secondary"
1369
                               " node(s) early")
1370

    
1371
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1372
                                  dest="new_cluster_cert",
1373
                                  default=False, action="store_true",
1374
                                  help="Generate a new cluster certificate")
1375

    
1376
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1377
                           default=None,
1378
                           help="File containing new RAPI certificate")
1379

    
1380
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1381
                               default=None, action="store_true",
1382
                               help=("Generate a new self-signed RAPI"
1383
                                     " certificate"))
1384

    
1385
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1386
                            default=None,
1387
                            help="File containing new SPICE certificate")
1388

    
1389
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1390
                              default=None,
1391
                              help="File containing the certificate of the CA"
1392
                              " which signed the SPICE certificate")
1393

    
1394
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1395
                                dest="new_spice_cert", default=None,
1396
                                action="store_true",
1397
                                help=("Generate a new self-signed SPICE"
1398
                                      " certificate"))
1399

    
1400
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1401
                                    dest="new_confd_hmac_key",
1402
                                    default=False, action="store_true",
1403
                                    help=("Create a new HMAC key for %s" %
1404
                                          constants.CONFD))
1405

    
1406
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1407
                                       dest="cluster_domain_secret",
1408
                                       default=None,
1409
                                       help=("Load new new cluster domain"
1410
                                             " secret from file"))
1411

    
1412
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1413
                                           dest="new_cluster_domain_secret",
1414
                                           default=False, action="store_true",
1415
                                           help=("Create a new cluster domain"
1416
                                                 " secret"))
1417

    
1418
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1419
                              dest="use_replication_network",
1420
                              help="Whether to use the replication network"
1421
                              " for talking to the nodes",
1422
                              action="store_true", default=False)
1423

    
1424
MAINTAIN_NODE_HEALTH_OPT = \
1425
    cli_option("--maintain-node-health", dest="maintain_node_health",
1426
               metavar=_YORNO, default=None, type="bool",
1427
               help="Configure the cluster to automatically maintain node"
1428
               " health, by shutting down unknown instances, shutting down"
1429
               " unknown DRBD devices, etc.")
1430

    
1431
IDENTIFY_DEFAULTS_OPT = \
1432
    cli_option("--identify-defaults", dest="identify_defaults",
1433
               default=False, action="store_true",
1434
               help="Identify which saved instance parameters are equal to"
1435
               " the current cluster defaults and set them as such, instead"
1436
               " of marking them as overridden")
1437

    
1438
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1439
                         action="store", dest="uid_pool",
1440
                         help=("A list of user-ids or user-id"
1441
                               " ranges separated by commas"))
1442

    
1443
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1444
                          action="store", dest="add_uids",
1445
                          help=("A list of user-ids or user-id"
1446
                                " ranges separated by commas, to be"
1447
                                " added to the user-id pool"))
1448

    
1449
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1450
                             action="store", dest="remove_uids",
1451
                             help=("A list of user-ids or user-id"
1452
                                   " ranges separated by commas, to be"
1453
                                   " removed from the user-id pool"))
1454

    
1455
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1456
                              action="store", dest="reserved_lvs",
1457
                              help=("A comma-separated list of reserved"
1458
                                    " logical volumes names, that will be"
1459
                                    " ignored by cluster verify"))
1460

    
1461
ROMAN_OPT = cli_option("--roman",
1462
                       dest="roman_integers", default=False,
1463
                       action="store_true",
1464
                       help="Use roman numbers for positive integers")
1465

    
1466
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1467
                             action="store", default=None,
1468
                             help="Specifies usermode helper for DRBD")
1469

    
1470
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1471
                                action="store_false", default=True,
1472
                                help="Disable support for DRBD")
1473

    
1474
PRIMARY_IP_VERSION_OPT = \
1475
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1476
               action="store", dest="primary_ip_version",
1477
               metavar="%d|%d" % (constants.IP4_VERSION,
1478
                                  constants.IP6_VERSION),
1479
               help="Cluster-wide IP version for primary IP")
1480

    
1481
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1482
                              action="store_true",
1483
                              help="Show machine name for every line in output")
1484

    
1485
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1486
                              action="store_true",
1487
                              help=("Hide successful results and show failures"
1488
                                    " only (determined by the exit code)"))
1489

    
1490
REASON_OPT = cli_option("--reason", default=None,
1491
                        help="The reason for executing the command")
1492

    
1493

    
1494
def _PriorityOptionCb(option, _, value, parser):
1495
  """Callback for processing C{--priority} option.
1496

1497
  """
1498
  value = _PRIONAME_TO_VALUE[value]
1499

    
1500
  setattr(parser.values, option.dest, value)
1501

    
1502

    
1503
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1504
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1505
                          choices=_PRIONAME_TO_VALUE.keys(),
1506
                          action="callback", type="choice",
1507
                          callback=_PriorityOptionCb,
1508
                          help="Priority for opcode processing")
1509

    
1510
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1511
                        type="bool", default=None, metavar=_YORNO,
1512
                        help="Sets the hidden flag on the OS")
1513

    
1514
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1515
                        type="bool", default=None, metavar=_YORNO,
1516
                        help="Sets the blacklisted flag on the OS")
1517

    
1518
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1519
                                     type="bool", metavar=_YORNO,
1520
                                     dest="prealloc_wipe_disks",
1521
                                     help=("Wipe disks prior to instance"
1522
                                           " creation"))
1523

    
1524
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1525
                             type="keyval", default=None,
1526
                             help="Node parameters")
1527

    
1528
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1529
                              action="store", metavar="POLICY", default=None,
1530
                              help="Allocation policy for the node group")
1531

    
1532
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1533
                              type="bool", metavar=_YORNO,
1534
                              dest="node_powered",
1535
                              help="Specify if the SoR for node is powered")
1536

    
1537
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1538
                             default=constants.OOB_TIMEOUT,
1539
                             help="Maximum time to wait for out-of-band helper")
1540

    
1541
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1542
                             default=constants.OOB_POWER_DELAY,
1543
                             help="Time in seconds to wait between power-ons")
1544

    
1545
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1546
                              action="store_true", default=False,
1547
                              help=("Whether command argument should be treated"
1548
                                    " as filter"))
1549

    
1550
NO_REMEMBER_OPT = cli_option("--no-remember",
1551
                             dest="no_remember",
1552
                             action="store_true", default=False,
1553
                             help="Perform but do not record the change"
1554
                             " in the configuration")
1555

    
1556
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1557
                              default=False, action="store_true",
1558
                              help="Evacuate primary instances only")
1559

    
1560
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1561
                                default=False, action="store_true",
1562
                                help="Evacuate secondary instances only"
1563
                                     " (applies only to internally mirrored"
1564
                                     " disk templates, e.g. %s)" %
1565
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1566

    
1567
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1568
                                action="store_true", default=False,
1569
                                help="Pause instance at startup")
1570

    
1571
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1572
                          help="Destination node group (name or uuid)",
1573
                          default=None, action="append",
1574
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1575

    
1576
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1577
                               action="append", dest="ignore_errors",
1578
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1579
                               help="Error code to be ignored")
1580

    
1581
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1582
                            action="append",
1583
                            help=("Specify disk state information in the"
1584
                                  " format"
1585
                                  " storage_type/identifier:option=value,...;"
1586
                                  " note this is unused for now"),
1587
                            type="identkeyval")
1588

    
1589
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1590
                          action="append",
1591
                          help=("Specify hypervisor state information in the"
1592
                                " format hypervisor:option=value,...;"
1593
                                " note this is unused for now"),
1594
                          type="identkeyval")
1595

    
1596
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1597
                                action="store_true", default=False,
1598
                                help="Ignore instance policy violations")
1599

    
1600
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1601
                             help="Sets the instance's runtime memory,"
1602
                             " ballooning it up or down to the new value",
1603
                             default=None, type="unit", metavar="<size>")
1604

    
1605
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1606
                          action="store_true", default=False,
1607
                          help="Marks the grow as absolute instead of the"
1608
                          " (default) relative mode")
1609

    
1610
NETWORK_OPT = cli_option("--network",
1611
                         action="store", default=None, dest="network",
1612
                         help="IP network in CIDR notation")
1613

    
1614
GATEWAY_OPT = cli_option("--gateway",
1615
                         action="store", default=None, dest="gateway",
1616
                         help="IP address of the router (gateway)")
1617

    
1618
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1619
                                  action="store", default=None,
1620
                                  dest="add_reserved_ips",
1621
                                  help="Comma-separated list of"
1622
                                  " reserved IPs to add")
1623

    
1624
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1625
                                     action="store", default=None,
1626
                                     dest="remove_reserved_ips",
1627
                                     help="Comma-delimited list of"
1628
                                     " reserved IPs to remove")
1629

    
1630
NETWORK6_OPT = cli_option("--network6",
1631
                          action="store", default=None, dest="network6",
1632
                          help="IP network in CIDR notation")
1633

    
1634
GATEWAY6_OPT = cli_option("--gateway6",
1635
                          action="store", default=None, dest="gateway6",
1636
                          help="IP6 address of the router (gateway)")
1637

    
1638
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1639
                                  dest="conflicts_check",
1640
                                  default=True,
1641
                                  action="store_false",
1642
                                  help="Don't check for conflicting IPs")
1643

    
1644
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1645
                                 default=False, action="store_true",
1646
                                 help="Include default values")
1647

    
1648
#: Options provided by all commands
1649
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1650

    
1651
# options related to asynchronous job handling
1652

    
1653
SUBMIT_OPTS = [
1654
  SUBMIT_OPT,
1655
  PRINT_JOBID_OPT,
1656
  ]
1657

    
1658
# common options for creating instances. add and import then add their own
1659
# specific ones.
1660
COMMON_CREATE_OPTS = [
1661
  BACKEND_OPT,
1662
  DISK_OPT,
1663
  DISK_TEMPLATE_OPT,
1664
  FILESTORE_DIR_OPT,
1665
  FILESTORE_DRIVER_OPT,
1666
  HYPERVISOR_OPT,
1667
  IALLOCATOR_OPT,
1668
  NET_OPT,
1669
  NODE_PLACEMENT_OPT,
1670
  NOIPCHECK_OPT,
1671
  NOCONFLICTSCHECK_OPT,
1672
  NONAMECHECK_OPT,
1673
  NONICS_OPT,
1674
  NWSYNC_OPT,
1675
  OSPARAMS_OPT,
1676
  OS_SIZE_OPT,
1677
  SUBMIT_OPT,
1678
  PRINT_JOBID_OPT,
1679
  TAG_ADD_OPT,
1680
  DRY_RUN_OPT,
1681
  PRIORITY_OPT,
1682
  ]
1683

    
1684
# common instance policy options
1685
INSTANCE_POLICY_OPTS = [
1686
  IPOLICY_BOUNDS_SPECS_OPT,
1687
  IPOLICY_DISK_TEMPLATES,
1688
  IPOLICY_VCPU_RATIO,
1689
  IPOLICY_SPINDLE_RATIO,
1690
  ]
1691

    
1692
# instance policy split specs options
1693
SPLIT_ISPECS_OPTS = [
1694
  SPECS_CPU_COUNT_OPT,
1695
  SPECS_DISK_COUNT_OPT,
1696
  SPECS_DISK_SIZE_OPT,
1697
  SPECS_MEM_SIZE_OPT,
1698
  SPECS_NIC_COUNT_OPT,
1699
  ]
1700

    
1701

    
1702
class _ShowUsage(Exception):
1703
  """Exception class for L{_ParseArgs}.
1704

1705
  """
1706
  def __init__(self, exit_error):
1707
    """Initializes instances of this class.
1708

1709
    @type exit_error: bool
1710
    @param exit_error: Whether to report failure on exit
1711

1712
    """
1713
    Exception.__init__(self)
1714
    self.exit_error = exit_error
1715

    
1716

    
1717
class _ShowVersion(Exception):
1718
  """Exception class for L{_ParseArgs}.
1719

1720
  """
1721

    
1722

    
1723
def _ParseArgs(binary, argv, commands, aliases, env_override):
1724
  """Parser for the command line arguments.
1725

1726
  This function parses the arguments and returns the function which
1727
  must be executed together with its (modified) arguments.
1728

1729
  @param binary: Script name
1730
  @param argv: Command line arguments
1731
  @param commands: Dictionary containing command definitions
1732
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1733
  @param env_override: list of env variables allowed for default args
1734
  @raise _ShowUsage: If usage description should be shown
1735
  @raise _ShowVersion: If version should be shown
1736

1737
  """
1738
  assert not (env_override - set(commands))
1739
  assert not (set(aliases.keys()) & set(commands.keys()))
1740

    
1741
  if len(argv) > 1:
1742
    cmd = argv[1]
1743
  else:
1744
    # No option or command given
1745
    raise _ShowUsage(exit_error=True)
1746

    
1747
  if cmd == "--version":
1748
    raise _ShowVersion()
1749
  elif cmd == "--help":
1750
    raise _ShowUsage(exit_error=False)
1751
  elif not (cmd in commands or cmd in aliases):
1752
    raise _ShowUsage(exit_error=True)
1753

    
1754
  # get command, unalias it, and look it up in commands
1755
  if cmd in aliases:
1756
    if aliases[cmd] not in commands:
1757
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1758
                                   " command '%s'" % (cmd, aliases[cmd]))
1759

    
1760
    cmd = aliases[cmd]
1761

    
1762
  if cmd in env_override:
1763
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1764
    env_args = os.environ.get(args_env_name)
1765
    if env_args:
1766
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1767

    
1768
  func, args_def, parser_opts, usage, description = commands[cmd]
1769
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1770
                        description=description,
1771
                        formatter=TitledHelpFormatter(),
1772
                        usage="%%prog %s %s" % (cmd, usage))
1773
  parser.disable_interspersed_args()
1774
  options, args = parser.parse_args(args=argv[2:])
1775

    
1776
  if not _CheckArguments(cmd, args_def, args):
1777
    return None, None, None
1778

    
1779
  return func, options, args
1780

    
1781

    
1782
def _FormatUsage(binary, commands):
1783
  """Generates a nice description of all commands.
1784

1785
  @param binary: Script name
1786
  @param commands: Dictionary containing command definitions
1787

1788
  """
1789
  # compute the max line length for cmd + usage
1790
  mlen = min(60, max(map(len, commands)))
1791

    
1792
  yield "Usage: %s {command} [options...] [argument...]" % binary
1793
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1794
  yield ""
1795
  yield "Commands:"
1796

    
1797
  # and format a nice command list
1798
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1799
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1800
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1801
    for line in help_lines:
1802
      yield " %-*s   %s" % (mlen, "", line)
1803

    
1804
  yield ""
1805

    
1806

    
1807
def _CheckArguments(cmd, args_def, args):
1808
  """Verifies the arguments using the argument definition.
1809

1810
  Algorithm:
1811

1812
    1. Abort with error if values specified by user but none expected.
1813

1814
    1. For each argument in definition
1815

1816
      1. Keep running count of minimum number of values (min_count)
1817
      1. Keep running count of maximum number of values (max_count)
1818
      1. If it has an unlimited number of values
1819

1820
        1. Abort with error if it's not the last argument in the definition
1821

1822
    1. If last argument has limited number of values
1823

1824
      1. Abort with error if number of values doesn't match or is too large
1825

1826
    1. Abort with error if user didn't pass enough values (min_count)
1827

1828
  """
1829
  if args and not args_def:
1830
    ToStderr("Error: Command %s expects no arguments", cmd)
1831
    return False
1832

    
1833
  min_count = None
1834
  max_count = None
1835
  check_max = None
1836

    
1837
  last_idx = len(args_def) - 1
1838

    
1839
  for idx, arg in enumerate(args_def):
1840
    if min_count is None:
1841
      min_count = arg.min
1842
    elif arg.min is not None:
1843
      min_count += arg.min
1844

    
1845
    if max_count is None:
1846
      max_count = arg.max
1847
    elif arg.max is not None:
1848
      max_count += arg.max
1849

    
1850
    if idx == last_idx:
1851
      check_max = (arg.max is not None)
1852

    
1853
    elif arg.max is None:
1854
      raise errors.ProgrammerError("Only the last argument can have max=None")
1855

    
1856
  if check_max:
1857
    # Command with exact number of arguments
1858
    if (min_count is not None and max_count is not None and
1859
        min_count == max_count and len(args) != min_count):
1860
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1861
      return False
1862

    
1863
    # Command with limited number of arguments
1864
    if max_count is not None and len(args) > max_count:
1865
      ToStderr("Error: Command %s expects only %d argument(s)",
1866
               cmd, max_count)
1867
      return False
1868

    
1869
  # Command with some required arguments
1870
  if min_count is not None and len(args) < min_count:
1871
    ToStderr("Error: Command %s expects at least %d argument(s)",
1872
             cmd, min_count)
1873
    return False
1874

    
1875
  return True
1876

    
1877

    
1878
def SplitNodeOption(value):
1879
  """Splits the value of a --node option.
1880

1881
  """
1882
  if value and ":" in value:
1883
    return value.split(":", 1)
1884
  else:
1885
    return (value, None)
1886

    
1887

    
1888
def CalculateOSNames(os_name, os_variants):
1889
  """Calculates all the names an OS can be called, according to its variants.
1890

1891
  @type os_name: string
1892
  @param os_name: base name of the os
1893
  @type os_variants: list or None
1894
  @param os_variants: list of supported variants
1895
  @rtype: list
1896
  @return: list of valid names
1897

1898
  """
1899
  if os_variants:
1900
    return ["%s+%s" % (os_name, v) for v in os_variants]
1901
  else:
1902
    return [os_name]
1903

    
1904

    
1905
def ParseFields(selected, default):
1906
  """Parses the values of "--field"-like options.
1907

1908
  @type selected: string or None
1909
  @param selected: User-selected options
1910
  @type default: list
1911
  @param default: Default fields
1912

1913
  """
1914
  if selected is None:
1915
    return default
1916

    
1917
  if selected.startswith("+"):
1918
    return default + selected[1:].split(",")
1919

    
1920
  return selected.split(",")
1921

    
1922

    
1923
UsesRPC = rpc.RunWithRPC
1924

    
1925

    
1926
def AskUser(text, choices=None):
1927
  """Ask the user a question.
1928

1929
  @param text: the question to ask
1930

1931
  @param choices: list with elements tuples (input_char, return_value,
1932
      description); if not given, it will default to: [('y', True,
1933
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1934
      note that the '?' char is reserved for help
1935

1936
  @return: one of the return values from the choices list; if input is
1937
      not possible (i.e. not running with a tty, we return the last
1938
      entry from the list
1939

1940
  """
1941
  if choices is None:
1942
    choices = [("y", True, "Perform the operation"),
1943
               ("n", False, "Do not perform the operation")]
1944
  if not choices or not isinstance(choices, list):
1945
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1946
  for entry in choices:
1947
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1948
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1949

    
1950
  answer = choices[-1][1]
1951
  new_text = []
1952
  for line in text.splitlines():
1953
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1954
  text = "\n".join(new_text)
1955
  try:
1956
    f = file("/dev/tty", "a+")
1957
  except IOError:
1958
    return answer
1959
  try:
1960
    chars = [entry[0] for entry in choices]
1961
    chars[-1] = "[%s]" % chars[-1]
1962
    chars.append("?")
1963
    maps = dict([(entry[0], entry[1]) for entry in choices])
1964
    while True:
1965
      f.write(text)
1966
      f.write("\n")
1967
      f.write("/".join(chars))
1968
      f.write(": ")
1969
      line = f.readline(2).strip().lower()
1970
      if line in maps:
1971
        answer = maps[line]
1972
        break
1973
      elif line == "?":
1974
        for entry in choices:
1975
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1976
        f.write("\n")
1977
        continue
1978
  finally:
1979
    f.close()
1980
  return answer
1981

    
1982

    
1983
class JobSubmittedException(Exception):
1984
  """Job was submitted, client should exit.
1985

1986
  This exception has one argument, the ID of the job that was
1987
  submitted. The handler should print this ID.
1988

1989
  This is not an error, just a structured way to exit from clients.
1990

1991
  """
1992

    
1993

    
1994
def SendJob(ops, cl=None):
1995
  """Function to submit an opcode without waiting for the results.
1996

1997
  @type ops: list
1998
  @param ops: list of opcodes
1999
  @type cl: luxi.Client
2000
  @param cl: the luxi client to use for communicating with the master;
2001
             if None, a new client will be created
2002

2003
  """
2004
  if cl is None:
2005
    cl = GetClient()
2006

    
2007
  job_id = cl.SubmitJob(ops)
2008

    
2009
  return job_id
2010

    
2011

    
2012
def GenericPollJob(job_id, cbs, report_cbs):
2013
  """Generic job-polling function.
2014

2015
  @type job_id: number
2016
  @param job_id: Job ID
2017
  @type cbs: Instance of L{JobPollCbBase}
2018
  @param cbs: Data callbacks
2019
  @type report_cbs: Instance of L{JobPollReportCbBase}
2020
  @param report_cbs: Reporting callbacks
2021

2022
  """
2023
  prev_job_info = None
2024
  prev_logmsg_serial = None
2025

    
2026
  status = None
2027

    
2028
  while True:
2029
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2030
                                      prev_logmsg_serial)
2031
    if not result:
2032
      # job not found, go away!
2033
      raise errors.JobLost("Job with id %s lost" % job_id)
2034

    
2035
    if result == constants.JOB_NOTCHANGED:
2036
      report_cbs.ReportNotChanged(job_id, status)
2037

    
2038
      # Wait again
2039
      continue
2040

    
2041
    # Split result, a tuple of (field values, log entries)
2042
    (job_info, log_entries) = result
2043
    (status, ) = job_info
2044

    
2045
    if log_entries:
2046
      for log_entry in log_entries:
2047
        (serial, timestamp, log_type, message) = log_entry
2048
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2049
                                    log_type, message)
2050
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2051

    
2052
    # TODO: Handle canceled and archived jobs
2053
    elif status in (constants.JOB_STATUS_SUCCESS,
2054
                    constants.JOB_STATUS_ERROR,
2055
                    constants.JOB_STATUS_CANCELING,
2056
                    constants.JOB_STATUS_CANCELED):
2057
      break
2058

    
2059
    prev_job_info = job_info
2060

    
2061
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2062
  if not jobs:
2063
    raise errors.JobLost("Job with id %s lost" % job_id)
2064

    
2065
  status, opstatus, result = jobs[0]
2066

    
2067
  if status == constants.JOB_STATUS_SUCCESS:
2068
    return result
2069

    
2070
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2071
    raise errors.OpExecError("Job was canceled")
2072

    
2073
  has_ok = False
2074
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2075
    if status == constants.OP_STATUS_SUCCESS:
2076
      has_ok = True
2077
    elif status == constants.OP_STATUS_ERROR:
2078
      errors.MaybeRaise(msg)
2079

    
2080
      if has_ok:
2081
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2082
                                 (idx, msg))
2083

    
2084
      raise errors.OpExecError(str(msg))
2085

    
2086
  # default failure mode
2087
  raise errors.OpExecError(result)
2088

    
2089

    
2090
class JobPollCbBase:
2091
  """Base class for L{GenericPollJob} callbacks.
2092

2093
  """
2094
  def __init__(self):
2095
    """Initializes this class.
2096

2097
    """
2098

    
2099
  def WaitForJobChangeOnce(self, job_id, fields,
2100
                           prev_job_info, prev_log_serial):
2101
    """Waits for changes on a job.
2102

2103
    """
2104
    raise NotImplementedError()
2105

    
2106
  def QueryJobs(self, job_ids, fields):
2107
    """Returns the selected fields for the selected job IDs.
2108

2109
    @type job_ids: list of numbers
2110
    @param job_ids: Job IDs
2111
    @type fields: list of strings
2112
    @param fields: Fields
2113

2114
    """
2115
    raise NotImplementedError()
2116

    
2117

    
2118
class JobPollReportCbBase:
2119
  """Base class for L{GenericPollJob} reporting callbacks.
2120

2121
  """
2122
  def __init__(self):
2123
    """Initializes this class.
2124

2125
    """
2126

    
2127
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2128
    """Handles a log message.
2129

2130
    """
2131
    raise NotImplementedError()
2132

    
2133
  def ReportNotChanged(self, job_id, status):
2134
    """Called for if a job hasn't changed in a while.
2135

2136
    @type job_id: number
2137
    @param job_id: Job ID
2138
    @type status: string or None
2139
    @param status: Job status if available
2140

2141
    """
2142
    raise NotImplementedError()
2143

    
2144

    
2145
class _LuxiJobPollCb(JobPollCbBase):
2146
  def __init__(self, cl):
2147
    """Initializes this class.
2148

2149
    """
2150
    JobPollCbBase.__init__(self)
2151
    self.cl = cl
2152

    
2153
  def WaitForJobChangeOnce(self, job_id, fields,
2154
                           prev_job_info, prev_log_serial):
2155
    """Waits for changes on a job.
2156

2157
    """
2158
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2159
                                        prev_job_info, prev_log_serial)
2160

    
2161
  def QueryJobs(self, job_ids, fields):
2162
    """Returns the selected fields for the selected job IDs.
2163

2164
    """
2165
    return self.cl.QueryJobs(job_ids, fields)
2166

    
2167

    
2168
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2169
  def __init__(self, feedback_fn):
2170
    """Initializes this class.
2171

2172
    """
2173
    JobPollReportCbBase.__init__(self)
2174

    
2175
    self.feedback_fn = feedback_fn
2176

    
2177
    assert callable(feedback_fn)
2178

    
2179
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2180
    """Handles a log message.
2181

2182
    """
2183
    self.feedback_fn((timestamp, log_type, log_msg))
2184

    
2185
  def ReportNotChanged(self, job_id, status):
2186
    """Called if a job hasn't changed in a while.
2187

2188
    """
2189
    # Ignore
2190

    
2191

    
2192
class StdioJobPollReportCb(JobPollReportCbBase):
2193
  def __init__(self):
2194
    """Initializes this class.
2195

2196
    """
2197
    JobPollReportCbBase.__init__(self)
2198

    
2199
    self.notified_queued = False
2200
    self.notified_waitlock = False
2201

    
2202
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2203
    """Handles a log message.
2204

2205
    """
2206
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2207
             FormatLogMessage(log_type, log_msg))
2208

    
2209
  def ReportNotChanged(self, job_id, status):
2210
    """Called if a job hasn't changed in a while.
2211

2212
    """
2213
    if status is None:
2214
      return
2215

    
2216
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2217
      ToStderr("Job %s is waiting in queue", job_id)
2218
      self.notified_queued = True
2219

    
2220
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2221
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2222
      self.notified_waitlock = True
2223

    
2224

    
2225
def FormatLogMessage(log_type, log_msg):
2226
  """Formats a job message according to its type.
2227

2228
  """
2229
  if log_type != constants.ELOG_MESSAGE:
2230
    log_msg = str(log_msg)
2231

    
2232
  return utils.SafeEncode(log_msg)
2233

    
2234

    
2235
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2236
  """Function to poll for the result of a job.
2237

2238
  @type job_id: job identified
2239
  @param job_id: the job to poll for results
2240
  @type cl: luxi.Client
2241
  @param cl: the luxi client to use for communicating with the master;
2242
             if None, a new client will be created
2243

2244
  """
2245
  if cl is None:
2246
    cl = GetClient()
2247

    
2248
  if reporter is None:
2249
    if feedback_fn:
2250
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2251
    else:
2252
      reporter = StdioJobPollReportCb()
2253
  elif feedback_fn:
2254
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2255

    
2256
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2257

    
2258

    
2259
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2260
  """Legacy function to submit an opcode.
2261

2262
  This is just a simple wrapper over the construction of the processor
2263
  instance. It should be extended to better handle feedback and
2264
  interaction functions.
2265

2266
  """
2267
  if cl is None:
2268
    cl = GetClient()
2269

    
2270
  SetGenericOpcodeOpts([op], opts)
2271

    
2272
  job_id = SendJob([op], cl=cl)
2273
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2274
    ToStdout("%d" % job_id)
2275

    
2276
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2277
                       reporter=reporter)
2278

    
2279
  return op_results[0]
2280

    
2281

    
2282
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2283
  """Wrapper around SubmitOpCode or SendJob.
2284

2285
  This function will decide, based on the 'opts' parameter, whether to
2286
  submit and wait for the result of the opcode (and return it), or
2287
  whether to just send the job and print its identifier. It is used in
2288
  order to simplify the implementation of the '--submit' option.
2289

2290
  It will also process the opcodes if we're sending the via SendJob
2291
  (otherwise SubmitOpCode does it).
2292

2293
  """
2294
  if opts and opts.submit_only:
2295
    job = [op]
2296
    SetGenericOpcodeOpts(job, opts)
2297
    job_id = SendJob(job, cl=cl)
2298
    if opts.print_jobid:
2299
      ToStdout("%d" % job_id)
2300
    raise JobSubmittedException(job_id)
2301
  else:
2302
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2303

    
2304

    
2305
def _InitReasonTrail(op, opts):
2306
  """Builds the first part of the reason trail
2307

2308
  Builds the initial part of the reason trail, adding the user provided reason
2309
  (if it exists) and the name of the command starting the operation.
2310

2311
  @param op: the opcode the reason trail will be added to
2312
  @param opts: the command line options selected by the user
2313

2314
  """
2315
  assert len(sys.argv) >= 2
2316
  trail = []
2317

    
2318
  if opts.reason:
2319
    trail.append((constants.OPCODE_REASON_SRC_USER,
2320
                  opts.reason,
2321
                  utils.EpochNano()))
2322

    
2323
  binary = os.path.basename(sys.argv[0])
2324
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2325
  command = sys.argv[1]
2326
  trail.append((source, command, utils.EpochNano()))
2327
  op.reason = trail
2328

    
2329

    
2330
def SetGenericOpcodeOpts(opcode_list, options):
2331
  """Processor for generic options.
2332

2333
  This function updates the given opcodes based on generic command
2334
  line options (like debug, dry-run, etc.).
2335

2336
  @param opcode_list: list of opcodes
2337
  @param options: command line options or None
2338
  @return: None (in-place modification)
2339

2340
  """
2341
  if not options:
2342
    return
2343
  for op in opcode_list:
2344
    op.debug_level = options.debug
2345
    if hasattr(options, "dry_run"):
2346
      op.dry_run = options.dry_run
2347
    if getattr(options, "priority", None) is not None:
2348
      op.priority = options.priority
2349
    _InitReasonTrail(op, options)
2350

    
2351

    
2352
def GetClient(query=False):
2353
  """Connects to the a luxi socket and returns a client.
2354

2355
  @type query: boolean
2356
  @param query: this signifies that the client will only be
2357
      used for queries; if the build-time parameter
2358
      enable-split-queries is enabled, then the client will be
2359
      connected to the query socket instead of the masterd socket
2360

2361
  """
2362
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2363
  if override_socket:
2364
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2365
      address = pathutils.MASTER_SOCKET
2366
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2367
      address = pathutils.QUERY_SOCKET
2368
    else:
2369
      address = override_socket
2370
  elif query and constants.ENABLE_SPLIT_QUERY:
2371
    address = pathutils.QUERY_SOCKET
2372
  else:
2373
    address = None
2374
  # TODO: Cache object?
2375
  try:
2376
    client = luxi.Client(address=address)
2377
  except luxi.NoMasterError:
2378
    ss = ssconf.SimpleStore()
2379

    
2380
    # Try to read ssconf file
2381
    try:
2382
      ss.GetMasterNode()
2383
    except errors.ConfigurationError:
2384
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2385
                                 " not part of a cluster",
2386
                                 errors.ECODE_INVAL)
2387

    
2388
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2389
    if master != myself:
2390
      raise errors.OpPrereqError("This is not the master node, please connect"
2391
                                 " to node '%s' and rerun the command" %
2392
                                 master, errors.ECODE_INVAL)
2393
    raise
2394
  return client
2395

    
2396

    
2397
def FormatError(err):
2398
  """Return a formatted error message for a given error.
2399

2400
  This function takes an exception instance and returns a tuple
2401
  consisting of two values: first, the recommended exit code, and
2402
  second, a string describing the error message (not
2403
  newline-terminated).
2404

2405
  """
2406
  retcode = 1
2407
  obuf = StringIO()
2408
  msg = str(err)
2409
  if isinstance(err, errors.ConfigurationError):
2410
    txt = "Corrupt configuration file: %s" % msg
2411
    logging.error(txt)
2412
    obuf.write(txt + "\n")
2413
    obuf.write("Aborting.")
2414
    retcode = 2
2415
  elif isinstance(err, errors.HooksAbort):
2416
    obuf.write("Failure: hooks execution failed:\n")
2417
    for node, script, out in err.args[0]:
2418
      if out:
2419
        obuf.write("  node: %s, script: %s, output: %s\n" %
2420
                   (node, script, out))
2421
      else:
2422
        obuf.write("  node: %s, script: %s (no output)\n" %
2423
                   (node, script))
2424
  elif isinstance(err, errors.HooksFailure):
2425
    obuf.write("Failure: hooks general failure: %s" % msg)
2426
  elif isinstance(err, errors.ResolverError):
2427
    this_host = netutils.Hostname.GetSysName()
2428
    if err.args[0] == this_host:
2429
      msg = "Failure: can't resolve my own hostname ('%s')"
2430
    else:
2431
      msg = "Failure: can't resolve hostname '%s'"
2432
    obuf.write(msg % err.args[0])
2433
  elif isinstance(err, errors.OpPrereqError):
2434
    if len(err.args) == 2:
2435
      obuf.write("Failure: prerequisites not met for this"
2436
                 " operation:\nerror type: %s, error details:\n%s" %
2437
                 (err.args[1], err.args[0]))
2438
    else:
2439
      obuf.write("Failure: prerequisites not met for this"
2440
                 " operation:\n%s" % msg)
2441
  elif isinstance(err, errors.OpExecError):
2442
    obuf.write("Failure: command execution error:\n%s" % msg)
2443
  elif isinstance(err, errors.TagError):
2444
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2445
  elif isinstance(err, errors.JobQueueDrainError):
2446
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2447
               " accept new requests\n")
2448
  elif isinstance(err, errors.JobQueueFull):
2449
    obuf.write("Failure: the job queue is full and doesn't accept new"
2450
               " job submissions until old jobs are archived\n")
2451
  elif isinstance(err, errors.TypeEnforcementError):
2452
    obuf.write("Parameter Error: %s" % msg)
2453
  elif isinstance(err, errors.ParameterError):
2454
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2455
  elif isinstance(err, luxi.NoMasterError):
2456
    if err.args[0] == pathutils.MASTER_SOCKET:
2457
      daemon = "the master daemon"
2458
    elif err.args[0] == pathutils.QUERY_SOCKET:
2459
      daemon = "the config daemon"
2460
    else:
2461
      daemon = "socket '%s'" % str(err.args[0])
2462
    obuf.write("Cannot communicate with %s.\nIs the process running"
2463
               " and listening for connections?" % daemon)
2464
  elif isinstance(err, luxi.TimeoutError):
2465
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2466
               " been submitted and will continue to run even if the call"
2467
               " timed out. Useful commands in this situation are \"gnt-job"
2468
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2469
    obuf.write(msg)
2470
  elif isinstance(err, luxi.PermissionError):
2471
    obuf.write("It seems you don't have permissions to connect to the"
2472
               " master daemon.\nPlease retry as a different user.")
2473
  elif isinstance(err, luxi.ProtocolError):
2474
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2475
               "%s" % msg)
2476
  elif isinstance(err, errors.JobLost):
2477
    obuf.write("Error checking job status: %s" % msg)
2478
  elif isinstance(err, errors.QueryFilterParseError):
2479
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2480
    obuf.write("\n".join(err.GetDetails()))
2481
  elif isinstance(err, errors.GenericError):
2482
    obuf.write("Unhandled Ganeti error: %s" % msg)
2483
  elif isinstance(err, JobSubmittedException):
2484
    obuf.write("JobID: %s\n" % err.args[0])
2485
    retcode = 0
2486
  else:
2487
    obuf.write("Unhandled exception: %s" % msg)
2488
  return retcode, obuf.getvalue().rstrip("\n")
2489

    
2490

    
2491
def GenericMain(commands, override=None, aliases=None,
2492
                env_override=frozenset()):
2493
  """Generic main function for all the gnt-* commands.
2494

2495
  @param commands: a dictionary with a special structure, see the design doc
2496
                   for command line handling.
2497
  @param override: if not None, we expect a dictionary with keys that will
2498
                   override command line options; this can be used to pass
2499
                   options from the scripts to generic functions
2500
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2501
  @param env_override: list of environment names which are allowed to submit
2502
                       default args for commands
2503

2504
  """
2505
  # save the program name and the entire command line for later logging
2506
  if sys.argv:
2507
    binary = os.path.basename(sys.argv[0])
2508
    if not binary:
2509
      binary = sys.argv[0]
2510

    
2511
    if len(sys.argv) >= 2:
2512
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2513
    else:
2514
      logname = binary
2515

    
2516
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2517
  else:
2518
    binary = "<unknown program>"
2519
    cmdline = "<unknown>"
2520

    
2521
  if aliases is None:
2522
    aliases = {}
2523

    
2524
  try:
2525
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2526
                                       env_override)
2527
  except _ShowVersion:
2528
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2529
             constants.RELEASE_VERSION)
2530
    return constants.EXIT_SUCCESS
2531
  except _ShowUsage, err:
2532
    for line in _FormatUsage(binary, commands):
2533
      ToStdout(line)
2534

    
2535
    if err.exit_error:
2536
      return constants.EXIT_FAILURE
2537
    else:
2538
      return constants.EXIT_SUCCESS
2539
  except errors.ParameterError, err:
2540
    result, err_msg = FormatError(err)
2541
    ToStderr(err_msg)
2542
    return 1
2543

    
2544
  if func is None: # parse error
2545
    return 1
2546

    
2547
  if override is not None:
2548
    for key, val in override.iteritems():
2549
      setattr(options, key, val)
2550

    
2551
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2552
                     stderr_logging=True)
2553

    
2554
  logging.info("Command line: %s", cmdline)
2555

    
2556
  try:
2557
    result = func(options, args)
2558
  except (errors.GenericError, luxi.ProtocolError,
2559
          JobSubmittedException), err:
2560
    result, err_msg = FormatError(err)
2561
    logging.exception("Error during command processing")
2562
    ToStderr(err_msg)
2563
  except KeyboardInterrupt:
2564
    result = constants.EXIT_FAILURE
2565
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2566
             " might have been submitted and"
2567
             " will continue to run in the background.")
2568
  except IOError, err:
2569
    if err.errno == errno.EPIPE:
2570
      # our terminal went away, we'll exit
2571
      sys.exit(constants.EXIT_FAILURE)
2572
    else:
2573
      raise
2574

    
2575
  return result
2576

    
2577

    
2578
def ParseNicOption(optvalue):
2579
  """Parses the value of the --net option(s).
2580

2581
  """
2582
  try:
2583
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2584
  except (TypeError, ValueError), err:
2585
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2586
                               errors.ECODE_INVAL)
2587

    
2588
  nics = [{}] * nic_max
2589
  for nidx, ndict in optvalue:
2590
    nidx = int(nidx)
2591

    
2592
    if not isinstance(ndict, dict):
2593
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2594
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2595

    
2596
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2597

    
2598
    nics[nidx] = ndict
2599

    
2600
  return nics
2601

    
2602

    
2603
def GenericInstanceCreate(mode, opts, args):
2604
  """Add an instance to the cluster via either creation or import.
2605

2606
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2607
  @param opts: the command line options selected by the user
2608
  @type args: list
2609
  @param args: should contain only one element, the new instance name
2610
  @rtype: int
2611
  @return: the desired exit code
2612

2613
  """
2614
  instance = args[0]
2615

    
2616
  (pnode, snode) = SplitNodeOption(opts.node)
2617

    
2618
  hypervisor = None
2619
  hvparams = {}
2620
  if opts.hypervisor:
2621
    hypervisor, hvparams = opts.hypervisor
2622

    
2623
  if opts.nics:
2624
    nics = ParseNicOption(opts.nics)
2625
  elif opts.no_nics:
2626
    # no nics
2627
    nics = []
2628
  elif mode == constants.INSTANCE_CREATE:
2629
    # default of one nic, all auto
2630
    nics = [{}]
2631
  else:
2632
    # mode == import
2633
    nics = []
2634

    
2635
  if opts.disk_template == constants.DT_DISKLESS:
2636
    if opts.disks or opts.sd_size is not None:
2637
      raise errors.OpPrereqError("Diskless instance but disk"
2638
                                 " information passed", errors.ECODE_INVAL)
2639
    disks = []
2640
  else:
2641
    if (not opts.disks and not opts.sd_size
2642
        and mode == constants.INSTANCE_CREATE):
2643
      raise errors.OpPrereqError("No disk information specified",
2644
                                 errors.ECODE_INVAL)
2645
    if opts.disks and opts.sd_size is not None:
2646
      raise errors.OpPrereqError("Please use either the '--disk' or"
2647
                                 " '-s' option", errors.ECODE_INVAL)
2648
    if opts.sd_size is not None:
2649
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2650

    
2651
    if opts.disks:
2652
      try:
2653
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2654
      except ValueError, err:
2655
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2656
                                   errors.ECODE_INVAL)
2657
      disks = [{}] * disk_max
2658
    else:
2659
      disks = []
2660
    for didx, ddict in opts.disks:
2661
      didx = int(didx)
2662
      if not isinstance(ddict, dict):
2663
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2664
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2665
      elif constants.IDISK_SIZE in ddict:
2666
        if constants.IDISK_ADOPT in ddict:
2667
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2668
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2669
        try:
2670
          ddict[constants.IDISK_SIZE] = \
2671
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2672
        except ValueError, err:
2673
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2674
                                     (didx, err), errors.ECODE_INVAL)
2675
      elif constants.IDISK_ADOPT in ddict:
2676
        if constants.IDISK_SPINDLES in ddict:
2677
          raise errors.OpPrereqError("spindles is not a valid option when"
2678
                                     " adopting a disk", errors.ECODE_INVAL)
2679
        if mode == constants.INSTANCE_IMPORT:
2680
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2681
                                     " import", errors.ECODE_INVAL)
2682
        ddict[constants.IDISK_SIZE] = 0
2683
      else:
2684
        raise errors.OpPrereqError("Missing size or adoption source for"
2685
                                   " disk %d" % didx, errors.ECODE_INVAL)
2686
      disks[didx] = ddict
2687

    
2688
  if opts.tags is not None:
2689
    tags = opts.tags.split(",")
2690
  else:
2691
    tags = []
2692

    
2693
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2694
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2695

    
2696
  if mode == constants.INSTANCE_CREATE:
2697
    start = opts.start
2698
    os_type = opts.os
2699
    force_variant = opts.force_variant
2700
    src_node = None
2701
    src_path = None
2702
    no_install = opts.no_install
2703
    identify_defaults = False
2704
  elif mode == constants.INSTANCE_IMPORT:
2705
    start = False
2706
    os_type = None
2707
    force_variant = False
2708
    src_node = opts.src_node
2709
    src_path = opts.src_dir
2710
    no_install = None
2711
    identify_defaults = opts.identify_defaults
2712
  else:
2713
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2714

    
2715
  op = opcodes.OpInstanceCreate(instance_name=instance,
2716
                                disks=disks,
2717
                                disk_template=opts.disk_template,
2718
                                nics=nics,
2719
                                conflicts_check=opts.conflicts_check,
2720
                                pnode=pnode, snode=snode,
2721
                                ip_check=opts.ip_check,
2722
                                name_check=opts.name_check,
2723
                                wait_for_sync=opts.wait_for_sync,
2724
                                file_storage_dir=opts.file_storage_dir,
2725
                                file_driver=opts.file_driver,
2726
                                iallocator=opts.iallocator,
2727
                                hypervisor=hypervisor,
2728
                                hvparams=hvparams,
2729
                                beparams=opts.beparams,
2730
                                osparams=opts.osparams,
2731
                                mode=mode,
2732
                                start=start,
2733
                                os_type=os_type,
2734
                                force_variant=force_variant,
2735
                                src_node=src_node,
2736
                                src_path=src_path,
2737
                                tags=tags,
2738
                                no_install=no_install,
2739
                                identify_defaults=identify_defaults,
2740
                                ignore_ipolicy=opts.ignore_ipolicy)
2741

    
2742
  SubmitOrSend(op, opts)
2743
  return 0
2744

    
2745

    
2746
class _RunWhileClusterStoppedHelper:
2747
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2748

2749
  """
2750
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2751
    """Initializes this class.
2752

2753
    @type feedback_fn: callable
2754
    @param feedback_fn: Feedback function
2755
    @type cluster_name: string
2756
    @param cluster_name: Cluster name
2757
    @type master_node: string
2758
    @param master_node Master node name
2759
    @type online_nodes: list
2760
    @param online_nodes: List of names of online nodes
2761

2762
    """
2763
    self.feedback_fn = feedback_fn
2764
    self.cluster_name = cluster_name
2765
    self.master_node = master_node
2766
    self.online_nodes = online_nodes
2767

    
2768
    self.ssh = ssh.SshRunner(self.cluster_name)
2769

    
2770
    self.nonmaster_nodes = [name for name in online_nodes
2771
                            if name != master_node]
2772

    
2773
    assert self.master_node not in self.nonmaster_nodes
2774

    
2775
  def _RunCmd(self, node_name, cmd):
2776
    """Runs a command on the local or a remote machine.
2777

2778
    @type node_name: string
2779
    @param node_name: Machine name
2780
    @type cmd: list
2781
    @param cmd: Command
2782

2783
    """
2784
    if node_name is None or node_name == self.master_node:
2785
      # No need to use SSH
2786
      result = utils.RunCmd(cmd)
2787
    else:
2788
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2789
                            utils.ShellQuoteArgs(cmd))
2790

    
2791
    if result.failed:
2792
      errmsg = ["Failed to run command %s" % result.cmd]
2793
      if node_name:
2794
        errmsg.append("on node %s" % node_name)
2795
      errmsg.append(": exitcode %s and error %s" %
2796
                    (result.exit_code, result.output))
2797
      raise errors.OpExecError(" ".join(errmsg))
2798

    
2799
  def Call(self, fn, *args):
2800
    """Call function while all daemons are stopped.
2801

2802
    @type fn: callable
2803
    @param fn: Function to be called
2804

2805
    """
2806
    # Pause watcher by acquiring an exclusive lock on watcher state file
2807
    self.feedback_fn("Blocking watcher")
2808
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2809
    try:
2810
      # TODO: Currently, this just blocks. There's no timeout.
2811
      # TODO: Should it be a shared lock?
2812
      watcher_block.Exclusive(blocking=True)
2813

    
2814
      # Stop master daemons, so that no new jobs can come in and all running
2815
      # ones are finished
2816
      self.feedback_fn("Stopping master daemons")
2817
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2818
      try:
2819
        # Stop daemons on all nodes
2820
        for node_name in self.online_nodes:
2821
          self.feedback_fn("Stopping daemons on %s" % node_name)
2822
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2823

    
2824
        # All daemons are shut down now
2825
        try:
2826
          return fn(self, *args)
2827
        except Exception, err:
2828
          _, errmsg = FormatError(err)
2829
          logging.exception("Caught exception")
2830
          self.feedback_fn(errmsg)
2831
          raise
2832
      finally:
2833
        # Start cluster again, master node last
2834
        for node_name in self.nonmaster_nodes + [self.master_node]:
2835
          self.feedback_fn("Starting daemons on %s" % node_name)
2836
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2837
    finally:
2838
      # Resume watcher
2839
      watcher_block.Close()
2840

    
2841

    
2842
def RunWhileClusterStopped(feedback_fn, fn, *args):
2843
  """Calls a function while all cluster daemons are stopped.
2844

2845
  @type feedback_fn: callable
2846
  @param feedback_fn: Feedback function
2847
  @type fn: callable
2848
  @param fn: Function to be called when daemons are stopped
2849

2850
  """
2851
  feedback_fn("Gathering cluster information")
2852

    
2853
  # This ensures we're running on the master daemon
2854
  cl = GetClient()
2855

    
2856
  (cluster_name, master_node) = \
2857
    cl.QueryConfigValues(["cluster_name", "master_node"])
2858

    
2859
  online_nodes = GetOnlineNodes([], cl=cl)
2860

    
2861
  # Don't keep a reference to the client. The master daemon will go away.
2862
  del cl
2863

    
2864
  assert master_node in online_nodes
2865

    
2866
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2867
                                       online_nodes).Call(fn, *args)
2868

    
2869

    
2870
def GenerateTable(headers, fields, separator, data,
2871
                  numfields=None, unitfields=None,
2872
                  units=None):
2873
  """Prints a table with headers and different fields.
2874

2875
  @type headers: dict
2876
  @param headers: dictionary mapping field names to headers for
2877
      the table
2878
  @type fields: list
2879
  @param fields: the field names corresponding to each row in
2880
      the data field
2881
  @param separator: the separator to be used; if this is None,
2882
      the default 'smart' algorithm is used which computes optimal
2883
      field width, otherwise just the separator is used between
2884
      each field
2885
  @type data: list
2886
  @param data: a list of lists, each sublist being one row to be output
2887
  @type numfields: list
2888
  @param numfields: a list with the fields that hold numeric
2889
      values and thus should be right-aligned
2890
  @type unitfields: list
2891
  @param unitfields: a list with the fields that hold numeric
2892
      values that should be formatted with the units field
2893
  @type units: string or None
2894
  @param units: the units we should use for formatting, or None for
2895
      automatic choice (human-readable for non-separator usage, otherwise
2896
      megabytes); this is a one-letter string
2897

2898
  """
2899
  if units is None:
2900
    if separator:
2901
      units = "m"
2902
    else:
2903
      units = "h"
2904

    
2905
  if numfields is None:
2906
    numfields = []
2907
  if unitfields is None:
2908
    unitfields = []
2909

    
2910
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2911
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2912

    
2913
  format_fields = []
2914
  for field in fields:
2915
    if headers and field not in headers:
2916
      # TODO: handle better unknown fields (either revert to old
2917
      # style of raising exception, or deal more intelligently with
2918
      # variable fields)
2919
      headers[field] = field
2920
    if separator is not None:
2921
      format_fields.append("%s")
2922
    elif numfields.Matches(field):
2923
      format_fields.append("%*s")
2924
    else:
2925
      format_fields.append("%-*s")
2926

    
2927
  if separator is None:
2928
    mlens = [0 for name in fields]
2929
    format_str = " ".join(format_fields)
2930
  else:
2931
    format_str = separator.replace("%", "%%").join(format_fields)
2932

    
2933
  for row in data:
2934
    if row is None:
2935
      continue
2936
    for idx, val in enumerate(row):
2937
      if unitfields.Matches(fields[idx]):
2938
        try:
2939
          val = int(val)
2940
        except (TypeError, ValueError):
2941
          pass
2942
        else:
2943
          val = row[idx] = utils.FormatUnit(val, units)
2944
      val = row[idx] = str(val)
2945
      if separator is None:
2946
        mlens[idx] = max(mlens[idx], len(val))
2947

    
2948
  result = []
2949
  if headers:
2950
    args = []
2951
    for idx, name in enumerate(fields):
2952
      hdr = headers[name]
2953
      if separator is None:
2954
        mlens[idx] = max(mlens[idx], len(hdr))
2955
        args.append(mlens[idx])
2956
      args.append(hdr)
2957
    result.append(format_str % tuple(args))
2958

    
2959
  if separator is None:
2960
    assert len(mlens) == len(fields)
2961

    
2962
    if fields and not numfields.Matches(fields[-1]):
2963
      mlens[-1] = 0
2964

    
2965
  for line in data:
2966
    args = []
2967
    if line is None:
2968
      line = ["-" for _ in fields]
2969
    for idx in range(len(fields)):
2970
      if separator is None:
2971
        args.append(mlens[idx])
2972
      args.append(line[idx])
2973
    result.append(format_str % tuple(args))
2974

    
2975
  return result
2976

    
2977

    
2978
def _FormatBool(value):
2979
  """Formats a boolean value as a string.
2980

2981
  """
2982
  if value:
2983
    return "Y"
2984
  return "N"
2985

    
2986

    
2987
#: Default formatting for query results; (callback, align right)
2988
_DEFAULT_FORMAT_QUERY = {
2989
  constants.QFT_TEXT: (str, False),
2990
  constants.QFT_BOOL: (_FormatBool, False),
2991
  constants.QFT_NUMBER: (str, True),
2992
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2993
  constants.QFT_OTHER: (str, False),
2994
  constants.QFT_UNKNOWN: (str, False),
2995
  }
2996

    
2997

    
2998
def _GetColumnFormatter(fdef, override, unit):
2999
  """Returns formatting function for a field.
3000

3001
  @type fdef: L{objects.QueryFieldDefinition}
3002
  @type override: dict
3003
  @param override: Dictionary for overriding field formatting functions,
3004
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3005
  @type unit: string
3006
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3007
  @rtype: tuple; (callable, bool)
3008
  @return: Returns the function to format a value (takes one parameter) and a
3009
    boolean for aligning the value on the right-hand side
3010

3011
  """
3012
  fmt = override.get(fdef.name, None)
3013
  if fmt is not None:
3014
    return fmt
3015

    
3016
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3017

    
3018
  if fdef.kind == constants.QFT_UNIT:
3019
    # Can't keep this information in the static dictionary
3020
    return (lambda value: utils.FormatUnit(value, unit), True)
3021

    
3022
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3023
  if fmt is not None:
3024
    return fmt
3025

    
3026
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3027

    
3028

    
3029
class _QueryColumnFormatter:
3030
  """Callable class for formatting fields of a query.
3031

3032
  """
3033
  def __init__(self, fn, status_fn, verbose):
3034
    """Initializes this class.
3035

3036
    @type fn: callable
3037
    @param fn: Formatting function
3038
    @type status_fn: callable
3039
    @param status_fn: Function to report fields' status
3040
    @type verbose: boolean
3041
    @param verbose: whether to use verbose field descriptions or not
3042

3043
    """
3044
    self._fn = fn
3045
    self._status_fn = status_fn
3046
    self._verbose = verbose
3047

    
3048
  def __call__(self, data):
3049
    """Returns a field's string representation.
3050

3051
    """
3052
    (status, value) = data
3053

    
3054
    # Report status
3055
    self._status_fn(status)
3056

    
3057
    if status == constants.RS_NORMAL:
3058
      return self._fn(value)
3059

    
3060
    assert value is None, \
3061
           "Found value %r for abnormal status %s" % (value, status)
3062

    
3063
    return FormatResultError(status, self._verbose)
3064

    
3065

    
3066
def FormatResultError(status, verbose):
3067
  """Formats result status other than L{constants.RS_NORMAL}.
3068

3069
  @param status: The result status
3070
  @type verbose: boolean
3071
  @param verbose: Whether to return the verbose text
3072
  @return: Text of result status
3073

3074
  """
3075
  assert status != constants.RS_NORMAL, \
3076
         "FormatResultError called with status equal to constants.RS_NORMAL"
3077
  try:
3078
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3079
  except KeyError:
3080
    raise NotImplementedError("Unknown status %s" % status)
3081
  else:
3082
    if verbose:
3083
      return verbose_text
3084
    return normal_text
3085

    
3086

    
3087
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3088
                      header=False, verbose=False):
3089
  """Formats data in L{objects.QueryResponse}.
3090

3091
  @type result: L{objects.QueryResponse}
3092
  @param result: result of query operation
3093
  @type unit: string
3094
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3095
    see L{utils.text.FormatUnit}
3096
  @type format_override: dict
3097
  @param format_override: Dictionary for overriding field formatting functions,
3098
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3099
  @type separator: string or None
3100
  @param separator: String used to separate fields
3101
  @type header: bool
3102
  @param header: Whether to output header row
3103
  @type verbose: boolean
3104
  @param verbose: whether to use verbose field descriptions or not
3105

3106
  """
3107
  if unit is None:
3108
    if separator:
3109
      unit = "m"
3110
    else:
3111
      unit = "h"
3112

    
3113
  if format_override is None:
3114
    format_override = {}
3115

    
3116
  stats = dict.fromkeys(constants.RS_ALL, 0)
3117

    
3118
  def _RecordStatus(status):
3119
    if status in stats:
3120
      stats[status] += 1
3121

    
3122
  columns = []
3123
  for fdef in result.fields:
3124
    assert fdef.title and fdef.name
3125
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3126
    columns.append(TableColumn(fdef.title,
3127
                               _QueryColumnFormatter(fn, _RecordStatus,
3128
                                                     verbose),
3129
                               align_right))
3130

    
3131
  table = FormatTable(result.data, columns, header, separator)
3132

    
3133
  # Collect statistics
3134
  assert len(stats) == len(constants.RS_ALL)
3135
  assert compat.all(count >= 0 for count in stats.values())
3136

    
3137
  # Determine overall status. If there was no data, unknown fields must be
3138
  # detected via the field definitions.
3139
  if (stats[constants.RS_UNKNOWN] or
3140
      (not result.data and _GetUnknownFields(result.fields))):
3141
    status = QR_UNKNOWN
3142
  elif compat.any(count > 0 for key, count in stats.items()
3143
                  if key != constants.RS_NORMAL):
3144
    status = QR_INCOMPLETE
3145
  else:
3146
    status = QR_NORMAL
3147

    
3148
  return (status, table)
3149

    
3150

    
3151
def _GetUnknownFields(fdefs):
3152
  """Returns list of unknown fields included in C{fdefs}.
3153

3154
  @type fdefs: list of L{objects.QueryFieldDefinition}
3155

3156
  """
3157
  return [fdef for fdef in fdefs
3158
          if fdef.kind == constants.QFT_UNKNOWN]
3159

    
3160

    
3161
def _WarnUnknownFields(fdefs):
3162
  """Prints a warning to stderr if a query included unknown fields.
3163

3164
  @type fdefs: list of L{objects.QueryFieldDefinition}
3165

3166
  """
3167
  unknown = _GetUnknownFields(fdefs)
3168
  if unknown:
3169
    ToStderr("Warning: Queried for unknown fields %s",
3170
             utils.CommaJoin(fdef.name for fdef in unknown))
3171
    return True
3172

    
3173
  return False
3174

    
3175

    
3176
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3177
                format_override=None, verbose=False, force_filter=False,
3178
                namefield=None, qfilter=None, isnumeric=False):
3179
  """Generic implementation for listing all items of a resource.
3180

3181
  @param resource: One of L{constants.QR_VIA_LUXI}
3182
  @type fields: list of strings
3183
  @param fields: List of fields to query for
3184
  @type names: list of strings
3185
  @param names: Names of items to query for
3186
  @type unit: string or None
3187
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3188
    None for automatic choice (human-readable for non-separator usage,
3189
    otherwise megabytes); this is a one-letter string
3190
  @type separator: string or None
3191
  @param separator: String used to separate fields
3192
  @type header: bool
3193
  @param header: Whether to show header row
3194
  @type force_filter: bool
3195
  @param force_filter: Whether to always treat names as filter
3196
  @type format_override: dict
3197
  @param format_override: Dictionary for overriding field formatting functions,
3198
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3199
  @type verbose: boolean
3200
  @param verbose: whether to use verbose field descriptions or not
3201
  @type namefield: string
3202
  @param namefield: Name of field to use for simple filters (see
3203
    L{qlang.MakeFilter} for details)
3204
  @type qfilter: list or None
3205
  @param qfilter: Query filter (in addition to names)
3206
  @param isnumeric: bool
3207
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3208
    any simple filters built by namefield should use integer values to
3209
    reflect that
3210

3211
  """
3212
  if not names:
3213
    names = None
3214

    
3215
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3216
                                isnumeric=isnumeric)
3217

    
3218
  if qfilter is None:
3219
    qfilter = namefilter
3220
  elif namefilter is not None:
3221
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3222

    
3223
  if cl is None:
3224
    cl = GetClient()
3225

    
3226
  response = cl.Query(resource, fields, qfilter)
3227

    
3228
  found_unknown = _WarnUnknownFields(response.fields)
3229

    
3230
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3231
                                     header=header,
3232
                                     format_override=format_override,
3233
                                     verbose=verbose)
3234

    
3235
  for line in data:
3236
    ToStdout(line)
3237

    
3238
  assert ((found_unknown and status == QR_UNKNOWN) or
3239
          (not found_unknown and status != QR_UNKNOWN))
3240

    
3241
  if status == QR_UNKNOWN:
3242
    return constants.EXIT_UNKNOWN_FIELD
3243

    
3244
  # TODO: Should the list command fail if not all data could be collected?
3245
  return constants.EXIT_SUCCESS
3246

    
3247

    
3248
def _FieldDescValues(fdef):
3249
  """Helper function for L{GenericListFields} to get query field description.
3250

3251
  @type fdef: L{objects.QueryFieldDefinition}
3252
  @rtype: list
3253

3254
  """
3255
  return [
3256
    fdef.name,
3257
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3258
    fdef.title,
3259
    fdef.doc,
3260
    ]
3261

    
3262

    
3263
def GenericListFields(resource, fields, separator, header, cl=None):
3264
  """Generic implementation for listing fields for a resource.
3265

3266
  @param resource: One of L{constants.QR_VIA_LUXI}
3267
  @type fields: list of strings
3268
  @param fields: List of fields to query for
3269
  @type separator: string or None
3270
  @param separator: String used to separate fields
3271
  @type header: bool
3272
  @param header: Whether to show header row
3273

3274
  """
3275
  if cl is None:
3276
    cl = GetClient()
3277

    
3278
  if not fields:
3279
    fields = None
3280

    
3281
  response = cl.QueryFields(resource, fields)
3282

    
3283
  found_unknown = _WarnUnknownFields(response.fields)
3284

    
3285
  columns = [
3286
    TableColumn("Name", str, False),
3287
    TableColumn("Type", str, False),
3288
    TableColumn("Title", str, False),
3289
    TableColumn("Description", str, False),
3290
    ]
3291

    
3292
  rows = map(_FieldDescValues, response.fields)
3293

    
3294
  for line in FormatTable(rows, columns, header, separator):
3295
    ToStdout(line)
3296

    
3297
  if found_unknown:
3298
    return constants.EXIT_UNKNOWN_FIELD
3299

    
3300
  return constants.EXIT_SUCCESS
3301

    
3302

    
3303
class TableColumn:
3304
  """Describes a column for L{FormatTable}.
3305

3306
  """
3307
  def __init__(self, title, fn, align_right):
3308
    """Initializes this class.
3309

3310
    @type title: string
3311
    @param title: Column title
3312
    @type fn: callable
3313
    @param fn: Formatting function
3314
    @type align_right: bool
3315
    @param align_right: Whether to align values on the right-hand side
3316

3317
    """
3318
    self.title = title
3319
    self.format = fn
3320
    self.align_right = align_right
3321

    
3322

    
3323
def _GetColFormatString(width, align_right):
3324
  """Returns the format string for a field.
3325

3326
  """
3327
  if align_right:
3328
    sign = ""
3329
  else:
3330
    sign = "-"
3331

    
3332
  return "%%%s%ss" % (sign, width)
3333

    
3334

    
3335
def FormatTable(rows, columns, header, separator):
3336
  """Formats data as a table.
3337

3338
  @type rows: list of lists
3339
  @param rows: Row data, one list per row
3340
  @type columns: list of L{TableColumn}
3341
  @param columns: Column descriptions
3342
  @type header: bool
3343
  @param header: Whether to show header row
3344
  @type separator: string or None
3345
  @param separator: String used to separate columns
3346

3347
  """
3348
  if header:
3349
    data = [[col.title for col in columns]]
3350
    colwidth = [len(col.title) for col in columns]
3351
  else:
3352
    data = []
3353
    colwidth = [0 for _ in columns]
3354

    
3355
  # Format row data
3356
  for row in rows:
3357
    assert len(row) == len(columns)
3358

    
3359
    formatted = [col.format(value) for value, col in zip(row, columns)]
3360

    
3361
    if separator is None:
3362
      # Update column widths
3363
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3364
        # Modifying a list's items while iterating is fine
3365
        colwidth[idx] = max(oldwidth, len(value))
3366

    
3367
    data.append(formatted)
3368

    
3369
  if separator is not None:
3370
    # Return early if a separator is used
3371
    return [separator.join(row) for row in data]
3372

    
3373
  if columns and not columns[-1].align_right:
3374
    # Avoid unnecessary spaces at end of line
3375
    colwidth[-1] = 0
3376

    
3377
  # Build format string
3378
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3379
                  for col, width in zip(columns, colwidth)])
3380

    
3381
  return [fmt % tuple(row) for row in data]
3382

    
3383

    
3384
def FormatTimestamp(ts):
3385
  """Formats a given timestamp.
3386

3387
  @type ts: timestamp
3388
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3389

3390
  @rtype: string
3391
  @return: a string with the formatted timestamp
3392

3393
  """
3394
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3395
    return "?"
3396

    
3397
  (sec, usecs) = ts
3398
  return utils.FormatTime(sec, usecs=usecs)
3399

    
3400

    
3401
def ParseTimespec(value):
3402
  """Parse a time specification.
3403

3404
  The following suffixed will be recognized:
3405

3406
    - s: seconds
3407
    - m: minutes
3408
    - h: hours
3409
    - d: day
3410
    - w: weeks
3411

3412
  Without any suffix, the value will be taken to be in seconds.
3413

3414
  """
3415
  value = str(value)
3416
  if not value:
3417
    raise errors.OpPrereqError("Empty time specification passed",
3418
                               errors.ECODE_INVAL)
3419
  suffix_map = {
3420
    "s": 1,
3421
    "m": 60,
3422
    "h": 3600,
3423
    "d": 86400,
3424
    "w": 604800,
3425
    }
3426
  if value[-1] not in suffix_map:
3427
    try:
3428
      value = int(value)
3429
    except (TypeError, ValueError):
3430
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3431
                                 errors.ECODE_INVAL)
3432
  else:
3433
    multiplier = suffix_map[value[-1]]
3434
    value = value[:-1]
3435
    if not value: # no data left after stripping the suffix
3436
      raise errors.OpPrereqError("Invalid time specification (only"
3437
                                 " suffix passed)", errors.ECODE_INVAL)
3438
    try:
3439
      value = int(value) * multiplier
3440
    except (TypeError, ValueError):
3441
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3442
                                 errors.ECODE_INVAL)
3443
  return value
3444

    
3445

    
3446
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3447
                   filter_master=False, nodegroup=None):
3448
  """Returns the names of online nodes.
3449

3450
  This function will also log a warning on stderr with the names of
3451
  the online nodes.
3452

3453
  @param nodes: if not empty, use only this subset of nodes (minus the
3454
      offline ones)
3455
  @param cl: if not None, luxi client to use
3456
  @type nowarn: boolean
3457
  @param nowarn: by default, this function will output a note with the
3458
      offline nodes that are skipped; if this parameter is True the
3459
      note is not displayed
3460
  @type secondary_ips: boolean
3461
  @param secondary_ips: if True, return the secondary IPs instead of the
3462
      names, useful for doing network traffic over the replication interface
3463
      (if any)
3464
  @type filter_master: boolean
3465
  @param filter_master: if True, do not return the master node in the list
3466
      (useful in coordination with secondary_ips where we cannot check our
3467
      node name against the list)
3468
  @type nodegroup: string
3469
  @param nodegroup: If set, only return nodes in this node group
3470

3471
  """
3472
  if cl is None:
3473
    cl = GetClient()
3474

    
3475
  qfilter = []
3476

    
3477
  if nodes:
3478
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3479

    
3480
  if nodegroup is not None:
3481
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3482
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3483

    
3484
  if filter_master:
3485
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3486

    
3487
  if qfilter:
3488
    if len(qfilter) > 1:
3489
      final_filter = [qlang.OP_AND] + qfilter
3490
    else:
3491
      assert len(qfilter) == 1
3492
      final_filter = qfilter[0]
3493
  else:
3494
    final_filter = None
3495

    
3496
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3497

    
3498
  def _IsOffline(row):
3499
    (_, (_, offline), _) = row
3500
    return offline
3501

    
3502
  def _GetName(row):
3503
    ((_, name), _, _) = row
3504
    return name
3505

    
3506
  def _GetSip(row):
3507
    (_, _, (_, sip)) = row
3508
    return sip
3509

    
3510
  (offline, online) = compat.partition(result.data, _IsOffline)
3511

    
3512
  if offline and not nowarn:
3513
    ToStderr("Note: skipping offline node(s): %s" %
3514
             utils.CommaJoin(map(_GetName, offline)))
3515

    
3516
  if secondary_ips:
3517
    fn = _GetSip
3518
  else:
3519
    fn = _GetName
3520

    
3521
  return map(fn, online)
3522

    
3523

    
3524
def _ToStream(stream, txt, *args):
3525
  """Write a message to a stream, bypassing the logging system
3526

3527
  @type stream: file object
3528
  @param stream: the file to which we should write
3529
  @type txt: str
3530
  @param txt: the message
3531

3532
  """
3533
  try:
3534
    if args:
3535
      args = tuple(args)
3536
      stream.write(txt % args)
3537
    else:
3538
      stream.write(txt)
3539
    stream.write("\n")
3540
    stream.flush()
3541
  except IOError, err:
3542
    if err.errno == errno.EPIPE:
3543
      # our terminal went away, we'll exit
3544
      sys.exit(constants.EXIT_FAILURE)
3545
    else:
3546
      raise
3547

    
3548

    
3549
def ToStdout(txt, *args):
3550
  """Write a message to stdout only, bypassing the logging system
3551

3552
  This is just a wrapper over _ToStream.
3553

3554
  @type txt: str
3555
  @param txt: the message
3556

3557
  """
3558
  _ToStream(sys.stdout, txt, *args)
3559

    
3560

    
3561
def ToStderr(txt, *args):
3562
  """Write a message to stderr only, bypassing the logging system
3563

3564
  This is just a wrapper over _ToStream.
3565

3566
  @type txt: str
3567
  @param txt: the message
3568

3569
  """
3570
  _ToStream(sys.stderr, txt, *args)
3571

    
3572

    
3573
class JobExecutor(object):
3574
  """Class which manages the submission and execution of multiple jobs.
3575

3576
  Note that instances of this class should not be reused between
3577
  GetResults() calls.
3578

3579
  """
3580
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3581
    self.queue = []
3582
    if cl is None:
3583
      cl = GetClient()
3584
    self.cl = cl
3585
    self.verbose = verbose
3586
    self.jobs = []
3587
    self.opts = opts
3588
    self.feedback_fn = feedback_fn
3589
    self._counter = itertools.count()
3590

    
3591
  @staticmethod
3592
  def _IfName(name, fmt):
3593
    """Helper function for formatting name.
3594

3595
    """
3596
    if name:
3597
      return fmt % name
3598

    
3599
    return ""
3600

    
3601
  def QueueJob(self, name, *ops):
3602
    """Record a job for later submit.
3603

3604
    @type name: string
3605
    @param name: a description of the job, will be used in WaitJobSet
3606

3607
    """
3608
    SetGenericOpcodeOpts(ops, self.opts)
3609
    self.queue.append((self._counter.next(), name, ops))
3610

    
3611
  def AddJobId(self, name, status, job_id):
3612
    """Adds a job ID to the internal queue.
3613

3614
    """
3615
    self.jobs.append((self._counter.next(), status, job_id, name))
3616

    
3617
  def SubmitPending(self, each=False):
3618
    """Submit all pending jobs.
3619

3620
    """
3621
    if each:
3622
      results = []
3623
      for (_, _, ops) in self.queue:
3624
        # SubmitJob will remove the success status, but raise an exception if
3625
        # the submission fails, so we'll notice that anyway.
3626
        results.append([True, self.cl.SubmitJob(ops)[0]])
3627
    else:
3628
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3629
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3630
      self.jobs.append((idx, status, data, name))
3631

    
3632
  def _ChooseJob(self):
3633
    """Choose a non-waiting/queued job to poll next.
3634

3635
    """
3636
    assert self.jobs, "_ChooseJob called with empty job list"
3637

    
3638
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3639
                               ["status"])
3640
    assert result
3641

    
3642
    for job_data, status in zip(self.jobs, result):
3643
      if (isinstance(status, list) and status and
3644
          status[0] in (constants.JOB_STATUS_QUEUED,
3645
                        constants.JOB_STATUS_WAITING,
3646
                        constants.JOB_STATUS_CANCELING)):
3647
        # job is still present and waiting
3648
        continue
3649
      # good candidate found (either running job or lost job)
3650
      self.jobs.remove(job_data)
3651
      return job_data
3652

    
3653
    # no job found
3654
    return self.jobs.pop(0)
3655

    
3656
  def GetResults(self):
3657
    """Wait for and return the results of all jobs.
3658

3659
    @rtype: list
3660
    @return: list of tuples (success, job results), in the same order
3661
        as the submitted jobs; if a job has failed, instead of the result
3662
        there will be the error message
3663

3664
    """
3665
    if not self.jobs:
3666
      self.SubmitPending()
3667
    results = []
3668
    if self.verbose:
3669
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3670
      if ok_jobs:
3671
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3672

    
3673
    # first, remove any non-submitted jobs
3674
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3675
    for idx, _, jid, name in failures:
3676
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3677
      results.append((idx, False, jid))
3678

    
3679
    while self.jobs:
3680
      (idx, _, jid, name) = self._ChooseJob()
3681
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3682
      try:
3683
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3684
        success = True
3685
      except errors.JobLost, err:
3686
        _, job_result = FormatError(err)
3687
        ToStderr("Job %s%s has been archived, cannot check its result",
3688
                 jid, self._IfName(name, " for %s"))
3689
        success = False
3690
      except (errors.GenericError, luxi.ProtocolError), err:
3691
        _, job_result = FormatError(err)
3692
        success = False
3693
        # the error message will always be shown, verbose or not
3694
        ToStderr("Job %s%s has failed: %s",
3695
                 jid, self._IfName(name, " for %s"), job_result)
3696

    
3697
      results.append((idx, success, job_result))
3698

    
3699
    # sort based on the index, then drop it
3700
    results.sort()
3701
    results = [i[1:] for i in results]
3702

    
3703
    return results
3704

    
3705
  def WaitOrShow(self, wait):
3706
    """Wait for job results or only print the job IDs.
3707

3708
    @type wait: boolean
3709
    @param wait: whether to wait or not
3710

3711
    """
3712
    if wait:
3713
      return self.GetResults()
3714
    else:
3715
      if not self.jobs:
3716
        self.SubmitPending()
3717
      for _, status, result, name in self.jobs:
3718
        if status:
3719
          ToStdout("%s: %s", result, name)
3720
        else:
3721
          ToStderr("Failure for %s: %s", name, result)
3722
      return [row[1:3] for row in self.jobs]
3723

    
3724

    
3725
def FormatParamsDictInfo(param_dict, actual):
3726
  """Formats a parameter dictionary.
3727

3728
  @type param_dict: dict
3729
  @param param_dict: the own parameters
3730
  @type actual: dict
3731
  @param actual: the current parameter set (including defaults)
3732
  @rtype: dict
3733
  @return: dictionary where the value of each parameter is either a fully
3734
      formatted string or a dictionary containing formatted strings
3735

3736
  """
3737
  ret = {}
3738
  for (key, data) in actual.items():
3739
    if isinstance(data, dict) and data:
3740
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3741
    else:
3742
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3743
  return ret
3744

    
3745

    
3746
def _FormatListInfoDefault(data, def_data):
3747
  if data is not None:
3748
    ret = utils.CommaJoin(data)
3749
  else:
3750
    ret = "default (%s)" % utils.CommaJoin(def_data)
3751
  return ret
3752

    
3753

    
3754
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3755
  """Formats an instance policy.
3756

3757
  @type custom_ipolicy: dict
3758
  @param custom_ipolicy: own policy
3759
  @type eff_ipolicy: dict
3760
  @param eff_ipolicy: effective policy (including defaults); ignored for
3761
      cluster
3762
  @type iscluster: bool
3763
  @param iscluster: the policy is at cluster level
3764
  @rtype: list of pairs
3765
  @return: formatted data, suitable for L{PrintGenericInfo}
3766

3767
  """
3768
  if iscluster:
3769
    eff_ipolicy = custom_ipolicy
3770

    
3771
  minmax_out = []
3772
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3773
  if custom_minmax:
3774
    for (k, minmax) in enumerate(custom_minmax):
3775
      minmax_out.append([
3776
        ("%s/%s" % (key, k),
3777
         FormatParamsDictInfo(minmax[key], minmax[key]))
3778
        for key in constants.ISPECS_MINMAX_KEYS
3779
        ])
3780
  else:
3781
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3782
      minmax_out.append([
3783
        ("%s/%s" % (key, k),
3784
         FormatParamsDictInfo({}, minmax[key]))
3785
        for key in constants.ISPECS_MINMAX_KEYS
3786
        ])
3787
  ret = [("bounds specs", minmax_out)]
3788

    
3789
  if iscluster:
3790
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3791
    ret.append(
3792
      (constants.ISPECS_STD,
3793
       FormatParamsDictInfo(stdspecs, stdspecs))
3794
      )
3795

    
3796
  ret.append(
3797
    ("allowed disk templates",
3798
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3799
                            eff_ipolicy[constants.IPOLICY_DTS]))
3800
    )
3801
  ret.extend([
3802
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3803
    for key in constants.IPOLICY_PARAMETERS
3804
    ])
3805
  return ret
3806

    
3807

    
3808
def _PrintSpecsParameters(buf, specs):
3809
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3810
  buf.write(",".join(values))
3811

    
3812

    
3813
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3814
  """Print the command option used to generate the given instance policy.
3815

3816
  Currently only the parts dealing with specs are supported.
3817

3818
  @type buf: StringIO
3819
  @param buf: stream to write into
3820
  @type ipolicy: dict
3821
  @param ipolicy: instance policy
3822
  @type isgroup: bool
3823
  @param isgroup: whether the policy is at group level
3824

3825
  """
3826
  if not isgroup:
3827
    stdspecs = ipolicy.get("std")
3828
    if stdspecs:
3829
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3830
      _PrintSpecsParameters(buf, stdspecs)
3831
  minmaxes = ipolicy.get("minmax", [])
3832
  first = True
3833
  for minmax in minmaxes:
3834
    minspecs = minmax.get("min")
3835
    maxspecs = minmax.get("max")
3836
    if minspecs and maxspecs:
3837
      if first:
3838
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3839
        first = False
3840
      else:
3841
        buf.write("//")
3842
      buf.write("min:")
3843
      _PrintSpecsParameters(buf, minspecs)
3844
      buf.write("/max:")
3845
      _PrintSpecsParameters(buf, maxspecs)
3846

    
3847

    
3848
def ConfirmOperation(names, list_type, text, extra=""):
3849
  """Ask the user to confirm an operation on a list of list_type.
3850

3851
  This function is used to request confirmation for doing an operation
3852
  on a given list of list_type.
3853

3854
  @type names: list
3855
  @param names: the list of names that we display when
3856
      we ask for confirmation
3857
  @type list_type: str
3858
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3859
  @type text: str
3860
  @param text: the operation that the user should confirm
3861
  @rtype: boolean
3862
  @return: True or False depending on user's confirmation.
3863

3864
  """
3865
  count = len(names)
3866
  msg = ("The %s will operate on %d %s.\n%s"
3867
         "Do you want to continue?" % (text, count, list_type, extra))
3868
  affected = (("\nAffected %s:\n" % list_type) +
3869
              "\n".join(["  %s" % name for name in names]))
3870

    
3871
  choices = [("y", True, "Yes, execute the %s" % text),
3872
             ("n", False, "No, abort the %s" % text)]
3873

    
3874
  if count > 20:
3875
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3876
    question = msg
3877
  else:
3878
    question = msg + affected
3879

    
3880
  choice = AskUser(question, choices)
3881
  if choice == "v":
3882
    choices.pop(1)
3883
    choice = AskUser(msg + affected, choices)
3884
  return choice
3885

    
3886

    
3887
def _MaybeParseUnit(elements):
3888
  """Parses and returns an array of potential values with units.
3889

3890
  """
3891
  parsed = {}
3892
  for k, v in elements.items():
3893
    if v == constants.VALUE_DEFAULT:
3894
      parsed[k] = v
3895
    else:
3896
      parsed[k] = utils.ParseUnit(v)
3897
  return parsed
3898

    
3899

    
3900
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3901
                             ispecs_disk_count, ispecs_disk_size,
3902
                             ispecs_nic_count, group_ipolicy, fill_all):
3903
  try:
3904
    if ispecs_mem_size:
3905
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3906
    if ispecs_disk_size:
3907
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3908
  except (TypeError, ValueError, errors.UnitParseError), err:
3909
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3910
                               " in policy: %s" %
3911
                               (ispecs_disk_size, ispecs_mem_size, err),
3912
                               errors.ECODE_INVAL)
3913

    
3914
  # prepare ipolicy dict
3915
  ispecs_transposed = {
3916
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3917
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3918
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3919
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3920
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3921
    }
3922

    
3923
  # first, check that the values given are correct
3924
  if group_ipolicy:
3925
    forced_type = TISPECS_GROUP_TYPES
3926
  else:
3927
    forced_type = TISPECS_CLUSTER_TYPES
3928
  for specs in ispecs_transposed.values():
3929
    assert type(specs) is dict
3930
    utils.ForceDictType(specs, forced_type)
3931

    
3932
  # then transpose
3933
  ispecs = {
3934
    constants.ISPECS_MIN: {},
3935
    constants.ISPECS_MAX: {},
3936
    constants.ISPECS_STD: {},
3937
    }
3938
  for (name, specs) in ispecs_transposed.iteritems():
3939
    assert name in constants.ISPECS_PARAMETERS
3940
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3941
      assert key in ispecs
3942
      ispecs[key][name] = val
3943
  minmax_out = {}
3944
  for key in constants.ISPECS_MINMAX_KEYS:
3945
    if fill_all:
3946
      minmax_out[key] = \
3947
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3948
    else:
3949
      minmax_out[key] = ispecs[key]
3950
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3951
  if fill_all:
3952
    ipolicy[constants.ISPECS_STD] = \
3953
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3954
                         ispecs[constants.ISPECS_STD])
3955
  else:
3956
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3957

    
3958

    
3959
def _ParseSpecUnit(spec, keyname):
3960
  ret = spec.copy()
3961
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3962
    if k in ret:
3963
      try:
3964
        ret[k] = utils.ParseUnit(ret[k])
3965
      except (TypeError, ValueError, errors.UnitParseError), err:
3966
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3967
                                    " specs: %s" % (k, ret[k], keyname, err)),
3968
                                   errors.ECODE_INVAL)
3969
  return ret
3970

    
3971

    
3972
def _ParseISpec(spec, keyname, required):
3973
  ret = _ParseSpecUnit(spec, keyname)
3974
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3975
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3976
  if required and missing:
3977
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3978
                               (keyname, utils.CommaJoin(missing)),
3979
                               errors.ECODE_INVAL)
3980
  return ret
3981

    
3982

    
3983
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3984
  ret = None
3985
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3986
      len(minmax_ispecs[0]) == 1):
3987
    for (key, spec) in minmax_ispecs[0].items():
3988
      # This loop is executed exactly once
3989
      if key in allowed_values and not spec:
3990
        ret = key
3991
  return ret
3992

    
3993

    
3994
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3995
                            group_ipolicy, allowed_values):
3996
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3997
  if found_allowed is not None:
3998
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3999
  elif minmax_ispecs is not None:
4000
    minmax_out = []
4001
    for mmpair in minmax_ispecs:
4002
      mmpair_out = {}
4003
      for (key, spec) in mmpair.items():
4004
        if key not in constants.ISPECS_MINMAX_KEYS:
4005
          msg = "Invalid key in bounds instance specifications: %s" % key
4006
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4007
        mmpair_out[key] = _ParseISpec(spec, key, True)
4008
      minmax_out.append(mmpair_out)
4009
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4010
  if std_ispecs is not None:
4011
    assert not group_ipolicy # This is not an option for gnt-group
4012
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4013

    
4014

    
4015
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4016
                          ispecs_cpu_count=None,
4017
                          ispecs_disk_count=None,
4018
                          ispecs_disk_size=None,
4019
                          ispecs_nic_count=None,
4020
                          minmax_ispecs=None,
4021
                          std_ispecs=None,
4022
                          ipolicy_disk_templates=None,
4023
                          ipolicy_vcpu_ratio=None,
4024
                          ipolicy_spindle_ratio=None,
4025
                          group_ipolicy=False,
4026
                          allowed_values=None,
4027
                          fill_all=False):
4028
  """Creation of instance policy based on command line options.
4029

4030
  @param fill_all: whether for cluster policies we should ensure that
4031
    all values are filled
4032

4033
  """
4034
  assert not (fill_all and allowed_values)
4035

    
4036
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4037
                 ispecs_disk_size or ispecs_nic_count)
4038
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4039
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4040
                               " together with any --ipolicy-xxx-specs option",
4041
                               errors.ECODE_INVAL)
4042

    
4043
  ipolicy_out = objects.MakeEmptyIPolicy()
4044
  if split_specs:
4045
    assert fill_all
4046
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4047
                             ispecs_disk_count, ispecs_disk_size,
4048
                             ispecs_nic_count, group_ipolicy, fill_all)
4049
  elif (minmax_ispecs is not None or std_ispecs is not None):
4050
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4051
                            group_ipolicy, allowed_values)
4052

    
4053
  if ipolicy_disk_templates is not None:
4054
    if allowed_values and ipolicy_disk_templates in allowed_values:
4055
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4056
    else:
4057
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4058
  if ipolicy_vcpu_ratio is not None:
4059
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4060
  if ipolicy_spindle_ratio is not None:
4061
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4062

    
4063
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4064

    
4065
  if not group_ipolicy and fill_all:
4066
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4067

    
4068
  return ipolicy_out
4069

    
4070

    
4071
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4072
  """Formatting core of L{PrintGenericInfo}.
4073

4074
  @param buf: (string) stream to accumulate the result into
4075
  @param data: data to format
4076
  @type level: int
4077
  @param level: depth in the data hierarchy, used for indenting
4078
  @type afterkey: bool
4079
  @param afterkey: True when we are in the middle of a line after a key (used
4080
      to properly add newlines or indentation)
4081

4082
  """
4083
  baseind = "  "
4084
  if isinstance(data, dict):
4085
    if not data:
4086
      buf.write("\n")
4087
    else:
4088
      if afterkey:
4089
        buf.write("\n")
4090
        doindent = True
4091
      else:
4092
        doindent = False
4093
      for key in sorted(data):
4094
        if doindent:
4095
          buf.write(baseind * level)
4096
        else:
4097
          doindent = True
4098
        buf.write(key)
4099
        buf.write(": ")
4100
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4101
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4102
    # list of tuples (an ordered dictionary)
4103
    if afterkey:
4104
      buf.write("\n")
4105
      doindent = True
4106
    else:
4107
      doindent = False
4108
    for (key, val) in data:
4109
      if doindent:
4110
        buf.write(baseind * level)
4111
      else:
4112
        doindent = True
4113
      buf.write(key)
4114
      buf.write(": ")
4115
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4116
  elif isinstance(data, list):
4117
    if not data:
4118
      buf.write("\n")
4119
    else:
4120
      if afterkey:
4121
        buf.write("\n")
4122
        doindent = True
4123
      else:
4124
        doindent = False
4125
      for item in data:
4126
        if doindent:
4127
          buf.write(baseind * level)
4128
        else:
4129
          doindent = True
4130
        buf.write("-")
4131
        buf.write(baseind[1:])
4132
        _SerializeGenericInfo(buf, item, level + 1)
4133
  else:
4134
    # This branch should be only taken for strings, but it's practically
4135
    # impossible to guarantee that no other types are produced somewhere
4136
    buf.write(str(data))
4137
    buf.write("\n")
4138

    
4139

    
4140
def PrintGenericInfo(data):
4141
  """Print information formatted according to the hierarchy.
4142

4143
  The output is a valid YAML string.
4144

4145
  @param data: the data to print. It's a hierarchical structure whose elements
4146
      can be:
4147
        - dictionaries, where keys are strings and values are of any of the
4148
          types listed here
4149
        - lists of pairs (key, value), where key is a string and value is of
4150
          any of the types listed here; it's a way to encode ordered
4151
          dictionaries
4152
        - lists of any of the types listed here
4153
        - strings
4154

4155
  """
4156
  buf = StringIO()
4157
  _SerializeGenericInfo(buf, data, 0)
4158
  ToStdout(buf.getvalue().rstrip("\n"))