Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ e7a77eb8

History | View | Annotate | Download (136.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HOTPLUG_OPT",
99
  "HOTPLUG_IF_POSSIBLE_OPT",
100
  "HVLIST_OPT",
101
  "HVOPTS_OPT",
102
  "HYPERVISOR_OPT",
103
  "IALLOCATOR_OPT",
104
  "DEFAULT_IALLOCATOR_OPT",
105
  "IDENTIFY_DEFAULTS_OPT",
106
  "IGNORE_CONSIST_OPT",
107
  "IGNORE_ERRORS_OPT",
108
  "IGNORE_FAILURES_OPT",
109
  "IGNORE_OFFLINE_OPT",
110
  "IGNORE_REMOVE_FAILURES_OPT",
111
  "IGNORE_SECONDARIES_OPT",
112
  "IGNORE_SIZE_OPT",
113
  "INCLUDEDEFAULTS_OPT",
114
  "INTERVAL_OPT",
115
  "MAC_PREFIX_OPT",
116
  "MAINTAIN_NODE_HEALTH_OPT",
117
  "MASTER_NETDEV_OPT",
118
  "MASTER_NETMASK_OPT",
119
  "MC_OPT",
120
  "MIGRATION_MODE_OPT",
121
  "MODIFY_ETCHOSTS_OPT",
122
  "NET_OPT",
123
  "NETWORK_OPT",
124
  "NETWORK6_OPT",
125
  "NEW_CLUSTER_CERT_OPT",
126
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
127
  "NEW_CONFD_HMAC_KEY_OPT",
128
  "NEW_RAPI_CERT_OPT",
129
  "NEW_PRIMARY_OPT",
130
  "NEW_SECONDARY_OPT",
131
  "NEW_SPICE_CERT_OPT",
132
  "NIC_PARAMS_OPT",
133
  "NOCONFLICTSCHECK_OPT",
134
  "NODE_FORCE_JOIN_OPT",
135
  "NODE_LIST_OPT",
136
  "NODE_PLACEMENT_OPT",
137
  "NODEGROUP_OPT",
138
  "NODE_PARAMS_OPT",
139
  "NODE_POWERED_OPT",
140
  "NODRBD_STORAGE_OPT",
141
  "NOHDR_OPT",
142
  "NOIPCHECK_OPT",
143
  "NO_INSTALL_OPT",
144
  "NONAMECHECK_OPT",
145
  "NOLVM_STORAGE_OPT",
146
  "NOMODIFY_ETCHOSTS_OPT",
147
  "NOMODIFY_SSH_SETUP_OPT",
148
  "NONICS_OPT",
149
  "NONLIVE_OPT",
150
  "NONPLUS1_OPT",
151
  "NORUNTIME_CHGS_OPT",
152
  "NOSHUTDOWN_OPT",
153
  "NOSTART_OPT",
154
  "NOSSH_KEYCHECK_OPT",
155
  "NOVOTING_OPT",
156
  "NO_REMEMBER_OPT",
157
  "NWSYNC_OPT",
158
  "OFFLINE_INST_OPT",
159
  "ONLINE_INST_OPT",
160
  "ON_PRIMARY_OPT",
161
  "ON_SECONDARY_OPT",
162
  "OFFLINE_OPT",
163
  "OSPARAMS_OPT",
164
  "OS_OPT",
165
  "OS_SIZE_OPT",
166
  "OOB_TIMEOUT_OPT",
167
  "POWER_DELAY_OPT",
168
  "PREALLOC_WIPE_DISKS_OPT",
169
  "PRIMARY_IP_VERSION_OPT",
170
  "PRIMARY_ONLY_OPT",
171
  "PRIORITY_OPT",
172
  "RAPI_CERT_OPT",
173
  "READD_OPT",
174
  "REASON_OPT",
175
  "REBOOT_TYPE_OPT",
176
  "REMOVE_INSTANCE_OPT",
177
  "REMOVE_RESERVED_IPS_OPT",
178
  "REMOVE_UIDS_OPT",
179
  "RESERVED_LVS_OPT",
180
  "RUNTIME_MEM_OPT",
181
  "ROMAN_OPT",
182
  "SECONDARY_IP_OPT",
183
  "SECONDARY_ONLY_OPT",
184
  "SELECT_OS_OPT",
185
  "SEP_OPT",
186
  "SHOWCMD_OPT",
187
  "SHOW_MACHINE_OPT",
188
  "SHUTDOWN_TIMEOUT_OPT",
189
  "SINGLE_NODE_OPT",
190
  "SPECS_CPU_COUNT_OPT",
191
  "SPECS_DISK_COUNT_OPT",
192
  "SPECS_DISK_SIZE_OPT",
193
  "SPECS_MEM_SIZE_OPT",
194
  "SPECS_NIC_COUNT_OPT",
195
  "SPLIT_ISPECS_OPTS",
196
  "IPOLICY_STD_SPECS_OPT",
197
  "IPOLICY_DISK_TEMPLATES",
198
  "IPOLICY_VCPU_RATIO",
199
  "SPICE_CACERT_OPT",
200
  "SPICE_CERT_OPT",
201
  "SRC_DIR_OPT",
202
  "SRC_NODE_OPT",
203
  "SUBMIT_OPT",
204
  "STARTUP_PAUSED_OPT",
205
  "STATIC_OPT",
206
  "SYNC_OPT",
207
  "TAG_ADD_OPT",
208
  "TAG_SRC_OPT",
209
  "TIMEOUT_OPT",
210
  "TO_GROUP_OPT",
211
  "UIDPOOL_OPT",
212
  "USEUNITS_OPT",
213
  "USE_EXTERNAL_MIP_SCRIPT",
214
  "USE_REPL_NET_OPT",
215
  "VERBOSE_OPT",
216
  "VG_NAME_OPT",
217
  "WFSYNC_OPT",
218
  "YES_DOIT_OPT",
219
  "DISK_STATE_OPT",
220
  "HV_STATE_OPT",
221
  "IGNORE_IPOLICY_OPT",
222
  "INSTANCE_POLICY_OPTS",
223
  # Generic functions for CLI programs
224
  "ConfirmOperation",
225
  "CreateIPolicyFromOpts",
226
  "GenericMain",
227
  "GenericInstanceCreate",
228
  "GenericList",
229
  "GenericListFields",
230
  "GetClient",
231
  "GetOnlineNodes",
232
  "JobExecutor",
233
  "JobSubmittedException",
234
  "ParseTimespec",
235
  "RunWhileClusterStopped",
236
  "SubmitOpCode",
237
  "SubmitOrSend",
238
  "UsesRPC",
239
  # Formatting functions
240
  "ToStderr", "ToStdout",
241
  "FormatError",
242
  "FormatQueryResult",
243
  "FormatParamsDictInfo",
244
  "FormatPolicyInfo",
245
  "PrintIPolicyCommand",
246
  "PrintGenericInfo",
247
  "GenerateTable",
248
  "AskUser",
249
  "FormatTimestamp",
250
  "FormatLogMessage",
251
  # Tags functions
252
  "ListTags",
253
  "AddTags",
254
  "RemoveTags",
255
  # command line options support infrastructure
256
  "ARGS_MANY_INSTANCES",
257
  "ARGS_MANY_NODES",
258
  "ARGS_MANY_GROUPS",
259
  "ARGS_MANY_NETWORKS",
260
  "ARGS_NONE",
261
  "ARGS_ONE_INSTANCE",
262
  "ARGS_ONE_NODE",
263
  "ARGS_ONE_GROUP",
264
  "ARGS_ONE_OS",
265
  "ARGS_ONE_NETWORK",
266
  "ArgChoice",
267
  "ArgCommand",
268
  "ArgFile",
269
  "ArgGroup",
270
  "ArgHost",
271
  "ArgInstance",
272
  "ArgJobId",
273
  "ArgNetwork",
274
  "ArgNode",
275
  "ArgOs",
276
  "ArgExtStorage",
277
  "ArgSuggest",
278
  "ArgUnknown",
279
  "OPT_COMPL_INST_ADD_NODES",
280
  "OPT_COMPL_MANY_NODES",
281
  "OPT_COMPL_ONE_IALLOCATOR",
282
  "OPT_COMPL_ONE_INSTANCE",
283
  "OPT_COMPL_ONE_NODE",
284
  "OPT_COMPL_ONE_NODEGROUP",
285
  "OPT_COMPL_ONE_NETWORK",
286
  "OPT_COMPL_ONE_OS",
287
  "OPT_COMPL_ONE_EXTSTORAGE",
288
  "cli_option",
289
  "SplitNodeOption",
290
  "CalculateOSNames",
291
  "ParseFields",
292
  "COMMON_CREATE_OPTS",
293
  ]
294

    
295
NO_PREFIX = "no_"
296
UN_PREFIX = "-"
297

    
298
#: Priorities (sorted)
299
_PRIORITY_NAMES = [
300
  ("low", constants.OP_PRIO_LOW),
301
  ("normal", constants.OP_PRIO_NORMAL),
302
  ("high", constants.OP_PRIO_HIGH),
303
  ]
304

    
305
#: Priority dictionary for easier lookup
306
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
307
# we migrate to Python 2.6
308
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
309

    
310
# Query result status for clients
311
(QR_NORMAL,
312
 QR_UNKNOWN,
313
 QR_INCOMPLETE) = range(3)
314

    
315
#: Maximum batch size for ChooseJob
316
_CHOOSE_BATCH = 25
317

    
318

    
319
# constants used to create InstancePolicy dictionary
320
TISPECS_GROUP_TYPES = {
321
  constants.ISPECS_MIN: constants.VTYPE_INT,
322
  constants.ISPECS_MAX: constants.VTYPE_INT,
323
  }
324

    
325
TISPECS_CLUSTER_TYPES = {
326
  constants.ISPECS_MIN: constants.VTYPE_INT,
327
  constants.ISPECS_MAX: constants.VTYPE_INT,
328
  constants.ISPECS_STD: constants.VTYPE_INT,
329
  }
330

    
331
#: User-friendly names for query2 field types
332
_QFT_NAMES = {
333
  constants.QFT_UNKNOWN: "Unknown",
334
  constants.QFT_TEXT: "Text",
335
  constants.QFT_BOOL: "Boolean",
336
  constants.QFT_NUMBER: "Number",
337
  constants.QFT_UNIT: "Storage size",
338
  constants.QFT_TIMESTAMP: "Timestamp",
339
  constants.QFT_OTHER: "Custom",
340
  }
341

    
342

    
343
class _Argument:
344
  def __init__(self, min=0, max=None): # pylint: disable=W0622
345
    self.min = min
346
    self.max = max
347

    
348
  def __repr__(self):
349
    return ("<%s min=%s max=%s>" %
350
            (self.__class__.__name__, self.min, self.max))
351

    
352

    
353
class ArgSuggest(_Argument):
354
  """Suggesting argument.
355

356
  Value can be any of the ones passed to the constructor.
357

358
  """
359
  # pylint: disable=W0622
360
  def __init__(self, min=0, max=None, choices=None):
361
    _Argument.__init__(self, min=min, max=max)
362
    self.choices = choices
363

    
364
  def __repr__(self):
365
    return ("<%s min=%s max=%s choices=%r>" %
366
            (self.__class__.__name__, self.min, self.max, self.choices))
367

    
368

    
369
class ArgChoice(ArgSuggest):
370
  """Choice argument.
371

372
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
373
  but value must be one of the choices.
374

375
  """
376

    
377

    
378
class ArgUnknown(_Argument):
379
  """Unknown argument to program (e.g. determined at runtime).
380

381
  """
382

    
383

    
384
class ArgInstance(_Argument):
385
  """Instances argument.
386

387
  """
388

    
389

    
390
class ArgNode(_Argument):
391
  """Node argument.
392

393
  """
394

    
395

    
396
class ArgNetwork(_Argument):
397
  """Network argument.
398

399
  """
400

    
401

    
402
class ArgGroup(_Argument):
403
  """Node group argument.
404

405
  """
406

    
407

    
408
class ArgJobId(_Argument):
409
  """Job ID argument.
410

411
  """
412

    
413

    
414
class ArgFile(_Argument):
415
  """File path argument.
416

417
  """
418

    
419

    
420
class ArgCommand(_Argument):
421
  """Command argument.
422

423
  """
424

    
425

    
426
class ArgHost(_Argument):
427
  """Host argument.
428

429
  """
430

    
431

    
432
class ArgOs(_Argument):
433
  """OS argument.
434

435
  """
436

    
437

    
438
class ArgExtStorage(_Argument):
439
  """ExtStorage argument.
440

441
  """
442

    
443

    
444
ARGS_NONE = []
445
ARGS_MANY_INSTANCES = [ArgInstance()]
446
ARGS_MANY_NETWORKS = [ArgNetwork()]
447
ARGS_MANY_NODES = [ArgNode()]
448
ARGS_MANY_GROUPS = [ArgGroup()]
449
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
450
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
451
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
452
# TODO
453
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
454
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
455

    
456

    
457
def _ExtractTagsObject(opts, args):
458
  """Extract the tag type object.
459

460
  Note that this function will modify its args parameter.
461

462
  """
463
  if not hasattr(opts, "tag_type"):
464
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
465
  kind = opts.tag_type
466
  if kind == constants.TAG_CLUSTER:
467
    retval = kind, None
468
  elif kind in (constants.TAG_NODEGROUP,
469
                constants.TAG_NODE,
470
                constants.TAG_NETWORK,
471
                constants.TAG_INSTANCE):
472
    if not args:
473
      raise errors.OpPrereqError("no arguments passed to the command",
474
                                 errors.ECODE_INVAL)
475
    name = args.pop(0)
476
    retval = kind, name
477
  else:
478
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
479
  return retval
480

    
481

    
482
def _ExtendTags(opts, args):
483
  """Extend the args if a source file has been given.
484

485
  This function will extend the tags with the contents of the file
486
  passed in the 'tags_source' attribute of the opts parameter. A file
487
  named '-' will be replaced by stdin.
488

489
  """
490
  fname = opts.tags_source
491
  if fname is None:
492
    return
493
  if fname == "-":
494
    new_fh = sys.stdin
495
  else:
496
    new_fh = open(fname, "r")
497
  new_data = []
498
  try:
499
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
500
    # because of python bug 1633941
501
    while True:
502
      line = new_fh.readline()
503
      if not line:
504
        break
505
      new_data.append(line.strip())
506
  finally:
507
    new_fh.close()
508
  args.extend(new_data)
509

    
510

    
511
def ListTags(opts, args):
512
  """List the tags on a given object.
513

514
  This is a generic implementation that knows how to deal with all
515
  three cases of tag objects (cluster, node, instance). The opts
516
  argument is expected to contain a tag_type field denoting what
517
  object type we work on.
518

519
  """
520
  kind, name = _ExtractTagsObject(opts, args)
521
  cl = GetClient(query=True)
522
  result = cl.QueryTags(kind, name)
523
  result = list(result)
524
  result.sort()
525
  for tag in result:
526
    ToStdout(tag)
527

    
528

    
529
def AddTags(opts, args):
530
  """Add tags on a given object.
531

532
  This is a generic implementation that knows how to deal with all
533
  three cases of tag objects (cluster, node, instance). The opts
534
  argument is expected to contain a tag_type field denoting what
535
  object type we work on.
536

537
  """
538
  kind, name = _ExtractTagsObject(opts, args)
539
  _ExtendTags(opts, args)
540
  if not args:
541
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
542
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
543
  SubmitOrSend(op, opts)
544

    
545

    
546
def RemoveTags(opts, args):
547
  """Remove tags from a given object.
548

549
  This is a generic implementation that knows how to deal with all
550
  three cases of tag objects (cluster, node, instance). The opts
551
  argument is expected to contain a tag_type field denoting what
552
  object type we work on.
553

554
  """
555
  kind, name = _ExtractTagsObject(opts, args)
556
  _ExtendTags(opts, args)
557
  if not args:
558
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
559
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
560
  SubmitOrSend(op, opts)
561

    
562

    
563
def check_unit(option, opt, value): # pylint: disable=W0613
564
  """OptParsers custom converter for units.
565

566
  """
567
  try:
568
    return utils.ParseUnit(value)
569
  except errors.UnitParseError, err:
570
    raise OptionValueError("option %s: %s" % (opt, err))
571

    
572

    
573
def _SplitKeyVal(opt, data, parse_prefixes):
574
  """Convert a KeyVal string into a dict.
575

576
  This function will convert a key=val[,...] string into a dict. Empty
577
  values will be converted specially: keys which have the prefix 'no_'
578
  will have the value=False and the prefix stripped, keys with the prefix
579
  "-" will have value=None and the prefix stripped, and the others will
580
  have value=True.
581

582
  @type opt: string
583
  @param opt: a string holding the option name for which we process the
584
      data, used in building error messages
585
  @type data: string
586
  @param data: a string of the format key=val,key=val,...
587
  @type parse_prefixes: bool
588
  @param parse_prefixes: whether to handle prefixes specially
589
  @rtype: dict
590
  @return: {key=val, key=val}
591
  @raises errors.ParameterError: if there are duplicate keys
592

593
  """
594
  kv_dict = {}
595
  if data:
596
    for elem in utils.UnescapeAndSplit(data, sep=","):
597
      if "=" in elem:
598
        key, val = elem.split("=", 1)
599
      elif parse_prefixes:
600
        if elem.startswith(NO_PREFIX):
601
          key, val = elem[len(NO_PREFIX):], False
602
        elif elem.startswith(UN_PREFIX):
603
          key, val = elem[len(UN_PREFIX):], None
604
        else:
605
          key, val = elem, True
606
      else:
607
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
608
                                    (elem, opt))
609
      if key in kv_dict:
610
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
611
                                    (key, opt))
612
      kv_dict[key] = val
613
  return kv_dict
614

    
615

    
616
def _SplitIdentKeyVal(opt, value, parse_prefixes):
617
  """Helper function to parse "ident:key=val,key=val" options.
618

619
  @type opt: string
620
  @param opt: option name, used in error messages
621
  @type value: string
622
  @param value: expected to be in the format "ident:key=val,key=val,..."
623
  @type parse_prefixes: bool
624
  @param parse_prefixes: whether to handle prefixes specially (see
625
      L{_SplitKeyVal})
626
  @rtype: tuple
627
  @return: (ident, {key=val, key=val})
628
  @raises errors.ParameterError: in case of duplicates or other parsing errors
629

630
  """
631
  if ":" not in value:
632
    ident, rest = value, ""
633
  else:
634
    ident, rest = value.split(":", 1)
635

    
636
  if parse_prefixes and ident.startswith(NO_PREFIX):
637
    if rest:
638
      msg = "Cannot pass options when removing parameter groups: %s" % value
639
      raise errors.ParameterError(msg)
640
    retval = (ident[len(NO_PREFIX):], False)
641
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
642
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
643
    if rest:
644
      msg = "Cannot pass options when removing parameter groups: %s" % value
645
      raise errors.ParameterError(msg)
646
    retval = (ident[len(UN_PREFIX):], None)
647
  else:
648
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
649
    retval = (ident, kv_dict)
650
  return retval
651

    
652

    
653
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
654
  """Custom parser for ident:key=val,key=val options.
655

656
  This will store the parsed values as a tuple (ident, {key: val}). As such,
657
  multiple uses of this option via action=append is possible.
658

659
  """
660
  return _SplitIdentKeyVal(opt, value, True)
661

    
662

    
663
def check_key_val(option, opt, value):  # pylint: disable=W0613
664
  """Custom parser class for key=val,key=val options.
665

666
  This will store the parsed values as a dict {key: val}.
667

668
  """
669
  return _SplitKeyVal(opt, value, True)
670

    
671

    
672
def _SplitListKeyVal(opt, value):
673
  retval = {}
674
  for elem in value.split("/"):
675
    if not elem:
676
      raise errors.ParameterError("Empty section in option '%s'" % opt)
677
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
678
    if ident in retval:
679
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
680
             (ident, opt, elem))
681
      raise errors.ParameterError(msg)
682
    retval[ident] = valdict
683
  return retval
684

    
685

    
686
def check_multilist_ident_key_val(_, opt, value):
687
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
688

689
  @rtype: list of dictionary
690
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
691

692
  """
693
  retval = []
694
  for line in value.split("//"):
695
    retval.append(_SplitListKeyVal(opt, line))
696
  return retval
697

    
698

    
699
def check_bool(option, opt, value): # pylint: disable=W0613
700
  """Custom parser for yes/no options.
701

702
  This will store the parsed value as either True or False.
703

704
  """
705
  value = value.lower()
706
  if value == constants.VALUE_FALSE or value == "no":
707
    return False
708
  elif value == constants.VALUE_TRUE or value == "yes":
709
    return True
710
  else:
711
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
712

    
713

    
714
def check_list(option, opt, value): # pylint: disable=W0613
715
  """Custom parser for comma-separated lists.
716

717
  """
718
  # we have to make this explicit check since "".split(",") is [""],
719
  # not an empty list :(
720
  if not value:
721
    return []
722
  else:
723
    return utils.UnescapeAndSplit(value)
724

    
725

    
726
def check_maybefloat(option, opt, value): # pylint: disable=W0613
727
  """Custom parser for float numbers which might be also defaults.
728

729
  """
730
  value = value.lower()
731

    
732
  if value == constants.VALUE_DEFAULT:
733
    return value
734
  else:
735
    return float(value)
736

    
737

    
738
# completion_suggestion is normally a list. Using numeric values not evaluating
739
# to False for dynamic completion.
740
(OPT_COMPL_MANY_NODES,
741
 OPT_COMPL_ONE_NODE,
742
 OPT_COMPL_ONE_INSTANCE,
743
 OPT_COMPL_ONE_OS,
744
 OPT_COMPL_ONE_EXTSTORAGE,
745
 OPT_COMPL_ONE_IALLOCATOR,
746
 OPT_COMPL_ONE_NETWORK,
747
 OPT_COMPL_INST_ADD_NODES,
748
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
749

    
750
OPT_COMPL_ALL = compat.UniqueFrozenset([
751
  OPT_COMPL_MANY_NODES,
752
  OPT_COMPL_ONE_NODE,
753
  OPT_COMPL_ONE_INSTANCE,
754
  OPT_COMPL_ONE_OS,
755
  OPT_COMPL_ONE_EXTSTORAGE,
756
  OPT_COMPL_ONE_IALLOCATOR,
757
  OPT_COMPL_ONE_NETWORK,
758
  OPT_COMPL_INST_ADD_NODES,
759
  OPT_COMPL_ONE_NODEGROUP,
760
  ])
761

    
762

    
763
class CliOption(Option):
764
  """Custom option class for optparse.
765

766
  """
767
  ATTRS = Option.ATTRS + [
768
    "completion_suggest",
769
    ]
770
  TYPES = Option.TYPES + (
771
    "multilistidentkeyval",
772
    "identkeyval",
773
    "keyval",
774
    "unit",
775
    "bool",
776
    "list",
777
    "maybefloat",
778
    )
779
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
780
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
781
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
782
  TYPE_CHECKER["keyval"] = check_key_val
783
  TYPE_CHECKER["unit"] = check_unit
784
  TYPE_CHECKER["bool"] = check_bool
785
  TYPE_CHECKER["list"] = check_list
786
  TYPE_CHECKER["maybefloat"] = check_maybefloat
787

    
788

    
789
# optparse.py sets make_option, so we do it for our own option class, too
790
cli_option = CliOption
791

    
792

    
793
_YORNO = "yes|no"
794

    
795
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
796
                       help="Increase debugging level")
797

    
798
NOHDR_OPT = cli_option("--no-headers", default=False,
799
                       action="store_true", dest="no_headers",
800
                       help="Don't display column headers")
801

    
802
SEP_OPT = cli_option("--separator", default=None,
803
                     action="store", dest="separator",
804
                     help=("Separator between output fields"
805
                           " (defaults to one space)"))
806

    
807
USEUNITS_OPT = cli_option("--units", default=None,
808
                          dest="units", choices=("h", "m", "g", "t"),
809
                          help="Specify units for output (one of h/m/g/t)")
810

    
811
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
812
                        type="string", metavar="FIELDS",
813
                        help="Comma separated list of output fields")
814

    
815
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
816
                       default=False, help="Force the operation")
817

    
818
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
819
                         default=False, help="Do not require confirmation")
820

    
821
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
822
                                  action="store_true", default=False,
823
                                  help=("Ignore offline nodes and do as much"
824
                                        " as possible"))
825

    
826
TAG_ADD_OPT = cli_option("--tags", dest="tags",
827
                         default=None, help="Comma-separated list of instance"
828
                                            " tags")
829

    
830
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
831
                         default=None, help="File with tag names")
832

    
833
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
834
                        default=False, action="store_true",
835
                        help=("Submit the job and return the job ID, but"
836
                              " don't wait for the job to finish"))
837

    
838
SYNC_OPT = cli_option("--sync", dest="do_locking",
839
                      default=False, action="store_true",
840
                      help=("Grab locks while doing the queries"
841
                            " in order to ensure more consistent results"))
842

    
843
DRY_RUN_OPT = cli_option("--dry-run", default=False,
844
                         action="store_true",
845
                         help=("Do not execute the operation, just run the"
846
                               " check steps and verify if it could be"
847
                               " executed"))
848

    
849
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
850
                         action="store_true",
851
                         help="Increase the verbosity of the operation")
852

    
853
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
854
                              action="store_true", dest="simulate_errors",
855
                              help="Debugging option that makes the operation"
856
                              " treat most runtime checks as failed")
857

    
858
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
859
                        default=True, action="store_false",
860
                        help="Don't wait for sync (DANGEROUS!)")
861

    
862
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
863
                        default=False, action="store_true",
864
                        help="Wait for disks to sync")
865

    
866
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
867
                             action="store_true", default=False,
868
                             help="Enable offline instance")
869

    
870
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
871
                              action="store_true", default=False,
872
                              help="Disable down instance")
873

    
874
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
875
                               help=("Custom disk setup (%s)" %
876
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
877
                               default=None, metavar="TEMPL",
878
                               choices=list(constants.DISK_TEMPLATES))
879

    
880
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
881
                        help="Do not create any network cards for"
882
                        " the instance")
883

    
884
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
885
                               help="Relative path under default cluster-wide"
886
                               " file storage dir to store file-based disks",
887
                               default=None, metavar="<DIR>")
888

    
889
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
890
                                  help="Driver to use for image files",
891
                                  default=None, metavar="<DRIVER>",
892
                                  choices=list(constants.FILE_DRIVER))
893

    
894
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
895
                            help="Select nodes for the instance automatically"
896
                            " using the <NAME> iallocator plugin",
897
                            default=None, type="string",
898
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
899

    
900
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
901
                                    metavar="<NAME>",
902
                                    help="Set the default instance"
903
                                    " allocator plugin",
904
                                    default=None, type="string",
905
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
906

    
907
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
908
                    metavar="<os>",
909
                    completion_suggest=OPT_COMPL_ONE_OS)
910

    
911
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
912
                          type="keyval", default={},
913
                          help="OS parameters")
914

    
915
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
916
                               action="store_true", default=False,
917
                               help="Force an unknown variant")
918

    
919
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
920
                            action="store_true", default=False,
921
                            help="Do not install the OS (will"
922
                            " enable no-start)")
923

    
924
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
925
                                dest="allow_runtime_chgs",
926
                                default=True, action="store_false",
927
                                help="Don't allow runtime changes")
928

    
929
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
930
                         type="keyval", default={},
931
                         help="Backend parameters")
932

    
933
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
934
                        default={}, dest="hvparams",
935
                        help="Hypervisor parameters")
936

    
937
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
938
                             help="Disk template parameters, in the format"
939
                             " template:option=value,option=value,...",
940
                             type="identkeyval", action="append", default=[])
941

    
942
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
943
                                 type="keyval", default={},
944
                                 help="Memory size specs: list of key=value,"
945
                                " where key is one of min, max, std"
946
                                 " (in MB or using a unit)")
947

    
948
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
949
                                 type="keyval", default={},
950
                                 help="CPU count specs: list of key=value,"
951
                                 " where key is one of min, max, std")
952

    
953
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
954
                                  dest="ispecs_disk_count",
955
                                  type="keyval", default={},
956
                                  help="Disk count specs: list of key=value,"
957
                                  " where key is one of min, max, std")
958

    
959
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
960
                                 type="keyval", default={},
961
                                 help="Disk size specs: list of key=value,"
962
                                 " where key is one of min, max, std"
963
                                 " (in MB or using a unit)")
964

    
965
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
966
                                 type="keyval", default={},
967
                                 help="NIC count specs: list of key=value,"
968
                                 " where key is one of min, max, std")
969

    
970
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
971
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
972
                                      dest="ipolicy_bounds_specs",
973
                                      type="multilistidentkeyval", default=None,
974
                                      help="Complete instance specs limits")
975

    
976
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
977
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
978
                                   dest="ipolicy_std_specs",
979
                                   type="keyval", default=None,
980
                                   help="Complte standard instance specs")
981

    
982
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
983
                                    dest="ipolicy_disk_templates",
984
                                    type="list", default=None,
985
                                    help="Comma-separated list of"
986
                                    " enabled disk templates")
987

    
988
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
989
                                 dest="ipolicy_vcpu_ratio",
990
                                 type="maybefloat", default=None,
991
                                 help="The maximum allowed vcpu-to-cpu ratio")
992

    
993
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
994
                                   dest="ipolicy_spindle_ratio",
995
                                   type="maybefloat", default=None,
996
                                   help=("The maximum allowed instances to"
997
                                         " spindle ratio"))
998

    
999
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1000
                            help="Hypervisor and hypervisor options, in the"
1001
                            " format hypervisor:option=value,option=value,...",
1002
                            default=None, type="identkeyval")
1003

    
1004
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1005
                        help="Hypervisor and hypervisor options, in the"
1006
                        " format hypervisor:option=value,option=value,...",
1007
                        default=[], action="append", type="identkeyval")
1008

    
1009
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1010
                           action="store_false",
1011
                           help="Don't check that the instance's IP"
1012
                           " is alive")
1013

    
1014
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1015
                             default=True, action="store_false",
1016
                             help="Don't check that the instance's name"
1017
                             " is resolvable")
1018

    
1019
NET_OPT = cli_option("--net",
1020
                     help="NIC parameters", default=[],
1021
                     dest="nics", action="append", type="identkeyval")
1022

    
1023
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1024
                      dest="disks", action="append", type="identkeyval")
1025

    
1026
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1027
                         help="Comma-separated list of disks"
1028
                         " indices to act on (e.g. 0,2) (optional,"
1029
                         " defaults to all disks)")
1030

    
1031
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1032
                         help="Enforces a single-disk configuration using the"
1033
                         " given disk size, in MiB unless a suffix is used",
1034
                         default=None, type="unit", metavar="<size>")
1035

    
1036
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1037
                                dest="ignore_consistency",
1038
                                action="store_true", default=False,
1039
                                help="Ignore the consistency of the disks on"
1040
                                " the secondary")
1041

    
1042
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1043
                                dest="allow_failover",
1044
                                action="store_true", default=False,
1045
                                help="If migration is not possible fallback to"
1046
                                     " failover")
1047

    
1048
NONLIVE_OPT = cli_option("--non-live", dest="live",
1049
                         default=True, action="store_false",
1050
                         help="Do a non-live migration (this usually means"
1051
                         " freeze the instance, save the state, transfer and"
1052
                         " only then resume running on the secondary node)")
1053

    
1054
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1055
                                default=None,
1056
                                choices=list(constants.HT_MIGRATION_MODES),
1057
                                help="Override default migration mode (choose"
1058
                                " either live or non-live")
1059

    
1060
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1061
                                help="Target node and optional secondary node",
1062
                                metavar="<pnode>[:<snode>]",
1063
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1064

    
1065
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1066
                           action="append", metavar="<node>",
1067
                           help="Use only this node (can be used multiple"
1068
                           " times, if not given defaults to all nodes)",
1069
                           completion_suggest=OPT_COMPL_ONE_NODE)
1070

    
1071
NODEGROUP_OPT_NAME = "--node-group"
1072
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1073
                           dest="nodegroup",
1074
                           help="Node group (name or uuid)",
1075
                           metavar="<nodegroup>",
1076
                           default=None, type="string",
1077
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1078

    
1079
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1080
                             metavar="<node>",
1081
                             completion_suggest=OPT_COMPL_ONE_NODE)
1082

    
1083
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1084
                         action="store_false",
1085
                         help="Don't start the instance after creation")
1086

    
1087
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1088
                         action="store_true", default=False,
1089
                         help="Show command instead of executing it")
1090

    
1091
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1092
                         default=False, action="store_true",
1093
                         help="Instead of performing the migration/failover,"
1094
                         " try to recover from a failed cleanup. This is safe"
1095
                         " to run even if the instance is healthy, but it"
1096
                         " will create extra replication traffic and "
1097
                         " disrupt briefly the replication (like during the"
1098
                         " migration/failover")
1099

    
1100
STATIC_OPT = cli_option("-s", "--static", dest="static",
1101
                        action="store_true", default=False,
1102
                        help="Only show configuration data, not runtime data")
1103

    
1104
ALL_OPT = cli_option("--all", dest="show_all",
1105
                     default=False, action="store_true",
1106
                     help="Show info on all instances on the cluster."
1107
                     " This can take a long time to run, use wisely")
1108

    
1109
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1110
                           action="store_true", default=False,
1111
                           help="Interactive OS reinstall, lists available"
1112
                           " OS templates for selection")
1113

    
1114
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1115
                                 action="store_true", default=False,
1116
                                 help="Remove the instance from the cluster"
1117
                                 " configuration even if there are failures"
1118
                                 " during the removal process")
1119

    
1120
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1121
                                        dest="ignore_remove_failures",
1122
                                        action="store_true", default=False,
1123
                                        help="Remove the instance from the"
1124
                                        " cluster configuration even if there"
1125
                                        " are failures during the removal"
1126
                                        " process")
1127

    
1128
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1129
                                 action="store_true", default=False,
1130
                                 help="Remove the instance from the cluster")
1131

    
1132
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1133
                               help="Specifies the new node for the instance",
1134
                               metavar="NODE", default=None,
1135
                               completion_suggest=OPT_COMPL_ONE_NODE)
1136

    
1137
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1138
                               help="Specifies the new secondary node",
1139
                               metavar="NODE", default=None,
1140
                               completion_suggest=OPT_COMPL_ONE_NODE)
1141

    
1142
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1143
                             help="Specifies the new primary node",
1144
                             metavar="<node>", default=None,
1145
                             completion_suggest=OPT_COMPL_ONE_NODE)
1146

    
1147
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1148
                            default=False, action="store_true",
1149
                            help="Replace the disk(s) on the primary"
1150
                                 " node (applies only to internally mirrored"
1151
                                 " disk templates, e.g. %s)" %
1152
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1153

    
1154
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1155
                              default=False, action="store_true",
1156
                              help="Replace the disk(s) on the secondary"
1157
                                   " node (applies only to internally mirrored"
1158
                                   " disk templates, e.g. %s)" %
1159
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1160

    
1161
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1162
                              default=False, action="store_true",
1163
                              help="Lock all nodes and auto-promote as needed"
1164
                              " to MC status")
1165

    
1166
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1167
                              default=False, action="store_true",
1168
                              help="Automatically replace faulty disks"
1169
                                   " (applies only to internally mirrored"
1170
                                   " disk templates, e.g. %s)" %
1171
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1172

    
1173
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1174
                             default=False, action="store_true",
1175
                             help="Ignore current recorded size"
1176
                             " (useful for forcing activation when"
1177
                             " the recorded size is wrong)")
1178

    
1179
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1180
                          metavar="<node>",
1181
                          completion_suggest=OPT_COMPL_ONE_NODE)
1182

    
1183
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1184
                         metavar="<dir>")
1185

    
1186
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1187
                              help="Specify the secondary ip for the node",
1188
                              metavar="ADDRESS", default=None)
1189

    
1190
READD_OPT = cli_option("--readd", dest="readd",
1191
                       default=False, action="store_true",
1192
                       help="Readd old node after replacing it")
1193

    
1194
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1195
                                default=True, action="store_false",
1196
                                help="Disable SSH key fingerprint checking")
1197

    
1198
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1199
                                 default=False, action="store_true",
1200
                                 help="Force the joining of a node")
1201

    
1202
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1203
                    type="bool", default=None, metavar=_YORNO,
1204
                    help="Set the master_candidate flag on the node")
1205

    
1206
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1207
                         type="bool", default=None,
1208
                         help=("Set the offline flag on the node"
1209
                               " (cluster does not communicate with offline"
1210
                               " nodes)"))
1211

    
1212
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1213
                         type="bool", default=None,
1214
                         help=("Set the drained flag on the node"
1215
                               " (excluded from allocation operations)"))
1216

    
1217
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1218
                              type="bool", default=None, metavar=_YORNO,
1219
                              help="Set the master_capable flag on the node")
1220

    
1221
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1222
                          type="bool", default=None, metavar=_YORNO,
1223
                          help="Set the vm_capable flag on the node")
1224

    
1225
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1226
                             type="bool", default=None, metavar=_YORNO,
1227
                             help="Set the allocatable flag on a volume")
1228

    
1229
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1230
                               help="Disable support for lvm based instances"
1231
                               " (cluster-wide)",
1232
                               action="store_false", default=True)
1233

    
1234
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1235
                            dest="enabled_hypervisors",
1236
                            help="Comma-separated list of hypervisors",
1237
                            type="string", default=None)
1238

    
1239
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1240
                                        dest="enabled_disk_templates",
1241
                                        help="Comma-separated list of "
1242
                                             "disk templates",
1243
                                        type="string", default=None)
1244

    
1245
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1246
                            type="keyval", default={},
1247
                            help="NIC parameters")
1248

    
1249
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1250
                         dest="candidate_pool_size", type="int",
1251
                         help="Set the candidate pool size")
1252

    
1253
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1254
                         help=("Enables LVM and specifies the volume group"
1255
                               " name (cluster-wide) for disk allocation"
1256
                               " [%s]" % constants.DEFAULT_VG),
1257
                         metavar="VG", default=None)
1258

    
1259
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1260
                          help="Destroy cluster", action="store_true")
1261

    
1262
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1263
                          help="Skip node agreement check (dangerous)",
1264
                          action="store_true", default=False)
1265

    
1266
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1267
                            help="Specify the mac prefix for the instance IP"
1268
                            " addresses, in the format XX:XX:XX",
1269
                            metavar="PREFIX",
1270
                            default=None)
1271

    
1272
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1273
                               help="Specify the node interface (cluster-wide)"
1274
                               " on which the master IP address will be added"
1275
                               " (cluster init default: %s)" %
1276
                               constants.DEFAULT_BRIDGE,
1277
                               metavar="NETDEV",
1278
                               default=None)
1279

    
1280
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1281
                                help="Specify the netmask of the master IP",
1282
                                metavar="NETMASK",
1283
                                default=None)
1284

    
1285
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1286
                                     dest="use_external_mip_script",
1287
                                     help="Specify whether to run a"
1288
                                     " user-provided script for the master"
1289
                                     " IP address turnup and"
1290
                                     " turndown operations",
1291
                                     type="bool", metavar=_YORNO, default=None)
1292

    
1293
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1294
                                help="Specify the default directory (cluster-"
1295
                                "wide) for storing the file-based disks [%s]" %
1296
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1297
                                metavar="DIR",
1298
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1299

    
1300
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1301
  "--shared-file-storage-dir",
1302
  dest="shared_file_storage_dir",
1303
  help="Specify the default directory (cluster-wide) for storing the"
1304
  " shared file-based disks [%s]" %
1305
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1306
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1307

    
1308
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1309
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1310
                                   action="store_false", default=True)
1311

    
1312
MODIFY_ETCHOSTS_OPT = \
1313
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1314
            default=None, type="bool",
1315
            help="Defines whether the cluster should autonomously modify"
1316
            " and keep in sync the /etc/hosts file of the nodes")
1317

    
1318
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1319
                                    help="Don't initialize SSH keys",
1320
                                    action="store_false", default=True)
1321

    
1322
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1323
                             help="Enable parseable error messages",
1324
                             action="store_true", default=False)
1325

    
1326
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1327
                          help="Skip N+1 memory redundancy tests",
1328
                          action="store_true", default=False)
1329

    
1330
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1331
                             help="Type of reboot: soft/hard/full",
1332
                             default=constants.INSTANCE_REBOOT_HARD,
1333
                             metavar="<REBOOT>",
1334
                             choices=list(constants.REBOOT_TYPES))
1335

    
1336
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1337
                                    dest="ignore_secondaries",
1338
                                    default=False, action="store_true",
1339
                                    help="Ignore errors from secondaries")
1340

    
1341
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1342
                            action="store_false", default=True,
1343
                            help="Don't shutdown the instance (unsafe)")
1344

    
1345
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1346
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1347
                         help="Maximum time to wait")
1348

    
1349
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1350
                                  dest="shutdown_timeout", type="int",
1351
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1352
                                  help="Maximum time to wait for instance"
1353
                                  " shutdown")
1354

    
1355
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1356
                          default=None,
1357
                          help=("Number of seconds between repetions of the"
1358
                                " command"))
1359

    
1360
EARLY_RELEASE_OPT = cli_option("--early-release",
1361
                               dest="early_release", default=False,
1362
                               action="store_true",
1363
                               help="Release the locks on the secondary"
1364
                               " node(s) early")
1365

    
1366
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1367
                                  dest="new_cluster_cert",
1368
                                  default=False, action="store_true",
1369
                                  help="Generate a new cluster certificate")
1370

    
1371
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1372
                           default=None,
1373
                           help="File containing new RAPI certificate")
1374

    
1375
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1376
                               default=None, action="store_true",
1377
                               help=("Generate a new self-signed RAPI"
1378
                                     " certificate"))
1379

    
1380
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1381
                            default=None,
1382
                            help="File containing new SPICE certificate")
1383

    
1384
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1385
                              default=None,
1386
                              help="File containing the certificate of the CA"
1387
                              " which signed the SPICE certificate")
1388

    
1389
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1390
                                dest="new_spice_cert", default=None,
1391
                                action="store_true",
1392
                                help=("Generate a new self-signed SPICE"
1393
                                      " certificate"))
1394

    
1395
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1396
                                    dest="new_confd_hmac_key",
1397
                                    default=False, action="store_true",
1398
                                    help=("Create a new HMAC key for %s" %
1399
                                          constants.CONFD))
1400

    
1401
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1402
                                       dest="cluster_domain_secret",
1403
                                       default=None,
1404
                                       help=("Load new new cluster domain"
1405
                                             " secret from file"))
1406

    
1407
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1408
                                           dest="new_cluster_domain_secret",
1409
                                           default=False, action="store_true",
1410
                                           help=("Create a new cluster domain"
1411
                                                 " secret"))
1412

    
1413
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1414
                              dest="use_replication_network",
1415
                              help="Whether to use the replication network"
1416
                              " for talking to the nodes",
1417
                              action="store_true", default=False)
1418

    
1419
MAINTAIN_NODE_HEALTH_OPT = \
1420
    cli_option("--maintain-node-health", dest="maintain_node_health",
1421
               metavar=_YORNO, default=None, type="bool",
1422
               help="Configure the cluster to automatically maintain node"
1423
               " health, by shutting down unknown instances, shutting down"
1424
               " unknown DRBD devices, etc.")
1425

    
1426
IDENTIFY_DEFAULTS_OPT = \
1427
    cli_option("--identify-defaults", dest="identify_defaults",
1428
               default=False, action="store_true",
1429
               help="Identify which saved instance parameters are equal to"
1430
               " the current cluster defaults and set them as such, instead"
1431
               " of marking them as overridden")
1432

    
1433
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1434
                         action="store", dest="uid_pool",
1435
                         help=("A list of user-ids or user-id"
1436
                               " ranges separated by commas"))
1437

    
1438
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1439
                          action="store", dest="add_uids",
1440
                          help=("A list of user-ids or user-id"
1441
                                " ranges separated by commas, to be"
1442
                                " added to the user-id pool"))
1443

    
1444
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1445
                             action="store", dest="remove_uids",
1446
                             help=("A list of user-ids or user-id"
1447
                                   " ranges separated by commas, to be"
1448
                                   " removed from the user-id pool"))
1449

    
1450
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1451
                              action="store", dest="reserved_lvs",
1452
                              help=("A comma-separated list of reserved"
1453
                                    " logical volumes names, that will be"
1454
                                    " ignored by cluster verify"))
1455

    
1456
ROMAN_OPT = cli_option("--roman",
1457
                       dest="roman_integers", default=False,
1458
                       action="store_true",
1459
                       help="Use roman numbers for positive integers")
1460

    
1461
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1462
                             action="store", default=None,
1463
                             help="Specifies usermode helper for DRBD")
1464

    
1465
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1466
                                action="store_false", default=True,
1467
                                help="Disable support for DRBD")
1468

    
1469
PRIMARY_IP_VERSION_OPT = \
1470
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1471
               action="store", dest="primary_ip_version",
1472
               metavar="%d|%d" % (constants.IP4_VERSION,
1473
                                  constants.IP6_VERSION),
1474
               help="Cluster-wide IP version for primary IP")
1475

    
1476
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1477
                              action="store_true",
1478
                              help="Show machine name for every line in output")
1479

    
1480
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1481
                              action="store_true",
1482
                              help=("Hide successful results and show failures"
1483
                                    " only (determined by the exit code)"))
1484

    
1485
REASON_OPT = cli_option("--reason", default=None,
1486
                        help="The reason for executing the command")
1487

    
1488

    
1489
def _PriorityOptionCb(option, _, value, parser):
1490
  """Callback for processing C{--priority} option.
1491

1492
  """
1493
  value = _PRIONAME_TO_VALUE[value]
1494

    
1495
  setattr(parser.values, option.dest, value)
1496

    
1497

    
1498
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1499
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1500
                          choices=_PRIONAME_TO_VALUE.keys(),
1501
                          action="callback", type="choice",
1502
                          callback=_PriorityOptionCb,
1503
                          help="Priority for opcode processing")
1504

    
1505
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1506
                        type="bool", default=None, metavar=_YORNO,
1507
                        help="Sets the hidden flag on the OS")
1508

    
1509
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1510
                        type="bool", default=None, metavar=_YORNO,
1511
                        help="Sets the blacklisted flag on the OS")
1512

    
1513
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1514
                                     type="bool", metavar=_YORNO,
1515
                                     dest="prealloc_wipe_disks",
1516
                                     help=("Wipe disks prior to instance"
1517
                                           " creation"))
1518

    
1519
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1520
                             type="keyval", default=None,
1521
                             help="Node parameters")
1522

    
1523
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1524
                              action="store", metavar="POLICY", default=None,
1525
                              help="Allocation policy for the node group")
1526

    
1527
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1528
                              type="bool", metavar=_YORNO,
1529
                              dest="node_powered",
1530
                              help="Specify if the SoR for node is powered")
1531

    
1532
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1533
                             default=constants.OOB_TIMEOUT,
1534
                             help="Maximum time to wait for out-of-band helper")
1535

    
1536
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1537
                             default=constants.OOB_POWER_DELAY,
1538
                             help="Time in seconds to wait between power-ons")
1539

    
1540
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1541
                              action="store_true", default=False,
1542
                              help=("Whether command argument should be treated"
1543
                                    " as filter"))
1544

    
1545
NO_REMEMBER_OPT = cli_option("--no-remember",
1546
                             dest="no_remember",
1547
                             action="store_true", default=False,
1548
                             help="Perform but do not record the change"
1549
                             " in the configuration")
1550

    
1551
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1552
                              default=False, action="store_true",
1553
                              help="Evacuate primary instances only")
1554

    
1555
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1556
                                default=False, action="store_true",
1557
                                help="Evacuate secondary instances only"
1558
                                     " (applies only to internally mirrored"
1559
                                     " disk templates, e.g. %s)" %
1560
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1561

    
1562
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1563
                                action="store_true", default=False,
1564
                                help="Pause instance at startup")
1565

    
1566
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1567
                          help="Destination node group (name or uuid)",
1568
                          default=None, action="append",
1569
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1570

    
1571
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1572
                               action="append", dest="ignore_errors",
1573
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1574
                               help="Error code to be ignored")
1575

    
1576
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1577
                            action="append",
1578
                            help=("Specify disk state information in the"
1579
                                  " format"
1580
                                  " storage_type/identifier:option=value,...;"
1581
                                  " note this is unused for now"),
1582
                            type="identkeyval")
1583

    
1584
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1585
                          action="append",
1586
                          help=("Specify hypervisor state information in the"
1587
                                " format hypervisor:option=value,...;"
1588
                                " note this is unused for now"),
1589
                          type="identkeyval")
1590

    
1591
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1592
                                action="store_true", default=False,
1593
                                help="Ignore instance policy violations")
1594

    
1595
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1596
                             help="Sets the instance's runtime memory,"
1597
                             " ballooning it up or down to the new value",
1598
                             default=None, type="unit", metavar="<size>")
1599

    
1600
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1601
                          action="store_true", default=False,
1602
                          help="Marks the grow as absolute instead of the"
1603
                          " (default) relative mode")
1604

    
1605
NETWORK_OPT = cli_option("--network",
1606
                         action="store", default=None, dest="network",
1607
                         help="IP network in CIDR notation")
1608

    
1609
GATEWAY_OPT = cli_option("--gateway",
1610
                         action="store", default=None, dest="gateway",
1611
                         help="IP address of the router (gateway)")
1612

    
1613
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1614
                                  action="store", default=None,
1615
                                  dest="add_reserved_ips",
1616
                                  help="Comma-separated list of"
1617
                                  " reserved IPs to add")
1618

    
1619
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1620
                                     action="store", default=None,
1621
                                     dest="remove_reserved_ips",
1622
                                     help="Comma-delimited list of"
1623
                                     " reserved IPs to remove")
1624

    
1625
NETWORK6_OPT = cli_option("--network6",
1626
                          action="store", default=None, dest="network6",
1627
                          help="IP network in CIDR notation")
1628

    
1629
GATEWAY6_OPT = cli_option("--gateway6",
1630
                          action="store", default=None, dest="gateway6",
1631
                          help="IP6 address of the router (gateway)")
1632

    
1633
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1634
                                  dest="conflicts_check",
1635
                                  default=True,
1636
                                  action="store_false",
1637
                                  help="Don't check for conflicting IPs")
1638

    
1639
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1640
                                 default=False, action="store_true",
1641
                                 help="Include default values")
1642

    
1643
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1644
                         action="store_true", default=False,
1645
                         help="Hotplug supported devices (NICs and Disks)")
1646

    
1647
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1648
                                     dest="hotplug_if_possible",
1649
                                     action="store_true", default=False,
1650
                                     help="Hotplug devices in case"
1651
                                          " hotplug is supported")
1652

    
1653
#: Options provided by all commands
1654
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1655

    
1656
# common options for creating instances. add and import then add their own
1657
# specific ones.
1658
COMMON_CREATE_OPTS = [
1659
  BACKEND_OPT,
1660
  DISK_OPT,
1661
  DISK_TEMPLATE_OPT,
1662
  FILESTORE_DIR_OPT,
1663
  FILESTORE_DRIVER_OPT,
1664
  HYPERVISOR_OPT,
1665
  IALLOCATOR_OPT,
1666
  NET_OPT,
1667
  NODE_PLACEMENT_OPT,
1668
  NOIPCHECK_OPT,
1669
  NOCONFLICTSCHECK_OPT,
1670
  NONAMECHECK_OPT,
1671
  NONICS_OPT,
1672
  NWSYNC_OPT,
1673
  OSPARAMS_OPT,
1674
  OS_SIZE_OPT,
1675
  SUBMIT_OPT,
1676
  TAG_ADD_OPT,
1677
  DRY_RUN_OPT,
1678
  PRIORITY_OPT,
1679
  ]
1680

    
1681
# common instance policy options
1682
INSTANCE_POLICY_OPTS = [
1683
  IPOLICY_BOUNDS_SPECS_OPT,
1684
  IPOLICY_DISK_TEMPLATES,
1685
  IPOLICY_VCPU_RATIO,
1686
  IPOLICY_SPINDLE_RATIO,
1687
  ]
1688

    
1689
# instance policy split specs options
1690
SPLIT_ISPECS_OPTS = [
1691
  SPECS_CPU_COUNT_OPT,
1692
  SPECS_DISK_COUNT_OPT,
1693
  SPECS_DISK_SIZE_OPT,
1694
  SPECS_MEM_SIZE_OPT,
1695
  SPECS_NIC_COUNT_OPT,
1696
  ]
1697

    
1698

    
1699
class _ShowUsage(Exception):
1700
  """Exception class for L{_ParseArgs}.
1701

1702
  """
1703
  def __init__(self, exit_error):
1704
    """Initializes instances of this class.
1705

1706
    @type exit_error: bool
1707
    @param exit_error: Whether to report failure on exit
1708

1709
    """
1710
    Exception.__init__(self)
1711
    self.exit_error = exit_error
1712

    
1713

    
1714
class _ShowVersion(Exception):
1715
  """Exception class for L{_ParseArgs}.
1716

1717
  """
1718

    
1719

    
1720
def _ParseArgs(binary, argv, commands, aliases, env_override):
1721
  """Parser for the command line arguments.
1722

1723
  This function parses the arguments and returns the function which
1724
  must be executed together with its (modified) arguments.
1725

1726
  @param binary: Script name
1727
  @param argv: Command line arguments
1728
  @param commands: Dictionary containing command definitions
1729
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1730
  @param env_override: list of env variables allowed for default args
1731
  @raise _ShowUsage: If usage description should be shown
1732
  @raise _ShowVersion: If version should be shown
1733

1734
  """
1735
  assert not (env_override - set(commands))
1736
  assert not (set(aliases.keys()) & set(commands.keys()))
1737

    
1738
  if len(argv) > 1:
1739
    cmd = argv[1]
1740
  else:
1741
    # No option or command given
1742
    raise _ShowUsage(exit_error=True)
1743

    
1744
  if cmd == "--version":
1745
    raise _ShowVersion()
1746
  elif cmd == "--help":
1747
    raise _ShowUsage(exit_error=False)
1748
  elif not (cmd in commands or cmd in aliases):
1749
    raise _ShowUsage(exit_error=True)
1750

    
1751
  # get command, unalias it, and look it up in commands
1752
  if cmd in aliases:
1753
    if aliases[cmd] not in commands:
1754
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1755
                                   " command '%s'" % (cmd, aliases[cmd]))
1756

    
1757
    cmd = aliases[cmd]
1758

    
1759
  if cmd in env_override:
1760
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1761
    env_args = os.environ.get(args_env_name)
1762
    if env_args:
1763
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1764

    
1765
  func, args_def, parser_opts, usage, description = commands[cmd]
1766
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1767
                        description=description,
1768
                        formatter=TitledHelpFormatter(),
1769
                        usage="%%prog %s %s" % (cmd, usage))
1770
  parser.disable_interspersed_args()
1771
  options, args = parser.parse_args(args=argv[2:])
1772

    
1773
  if not _CheckArguments(cmd, args_def, args):
1774
    return None, None, None
1775

    
1776
  return func, options, args
1777

    
1778

    
1779
def _FormatUsage(binary, commands):
1780
  """Generates a nice description of all commands.
1781

1782
  @param binary: Script name
1783
  @param commands: Dictionary containing command definitions
1784

1785
  """
1786
  # compute the max line length for cmd + usage
1787
  mlen = min(60, max(map(len, commands)))
1788

    
1789
  yield "Usage: %s {command} [options...] [argument...]" % binary
1790
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1791
  yield ""
1792
  yield "Commands:"
1793

    
1794
  # and format a nice command list
1795
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1796
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1797
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1798
    for line in help_lines:
1799
      yield " %-*s   %s" % (mlen, "", line)
1800

    
1801
  yield ""
1802

    
1803

    
1804
def _CheckArguments(cmd, args_def, args):
1805
  """Verifies the arguments using the argument definition.
1806

1807
  Algorithm:
1808

1809
    1. Abort with error if values specified by user but none expected.
1810

1811
    1. For each argument in definition
1812

1813
      1. Keep running count of minimum number of values (min_count)
1814
      1. Keep running count of maximum number of values (max_count)
1815
      1. If it has an unlimited number of values
1816

1817
        1. Abort with error if it's not the last argument in the definition
1818

1819
    1. If last argument has limited number of values
1820

1821
      1. Abort with error if number of values doesn't match or is too large
1822

1823
    1. Abort with error if user didn't pass enough values (min_count)
1824

1825
  """
1826
  if args and not args_def:
1827
    ToStderr("Error: Command %s expects no arguments", cmd)
1828
    return False
1829

    
1830
  min_count = None
1831
  max_count = None
1832
  check_max = None
1833

    
1834
  last_idx = len(args_def) - 1
1835

    
1836
  for idx, arg in enumerate(args_def):
1837
    if min_count is None:
1838
      min_count = arg.min
1839
    elif arg.min is not None:
1840
      min_count += arg.min
1841

    
1842
    if max_count is None:
1843
      max_count = arg.max
1844
    elif arg.max is not None:
1845
      max_count += arg.max
1846

    
1847
    if idx == last_idx:
1848
      check_max = (arg.max is not None)
1849

    
1850
    elif arg.max is None:
1851
      raise errors.ProgrammerError("Only the last argument can have max=None")
1852

    
1853
  if check_max:
1854
    # Command with exact number of arguments
1855
    if (min_count is not None and max_count is not None and
1856
        min_count == max_count and len(args) != min_count):
1857
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1858
      return False
1859

    
1860
    # Command with limited number of arguments
1861
    if max_count is not None and len(args) > max_count:
1862
      ToStderr("Error: Command %s expects only %d argument(s)",
1863
               cmd, max_count)
1864
      return False
1865

    
1866
  # Command with some required arguments
1867
  if min_count is not None and len(args) < min_count:
1868
    ToStderr("Error: Command %s expects at least %d argument(s)",
1869
             cmd, min_count)
1870
    return False
1871

    
1872
  return True
1873

    
1874

    
1875
def SplitNodeOption(value):
1876
  """Splits the value of a --node option.
1877

1878
  """
1879
  if value and ":" in value:
1880
    return value.split(":", 1)
1881
  else:
1882
    return (value, None)
1883

    
1884

    
1885
def CalculateOSNames(os_name, os_variants):
1886
  """Calculates all the names an OS can be called, according to its variants.
1887

1888
  @type os_name: string
1889
  @param os_name: base name of the os
1890
  @type os_variants: list or None
1891
  @param os_variants: list of supported variants
1892
  @rtype: list
1893
  @return: list of valid names
1894

1895
  """
1896
  if os_variants:
1897
    return ["%s+%s" % (os_name, v) for v in os_variants]
1898
  else:
1899
    return [os_name]
1900

    
1901

    
1902
def ParseFields(selected, default):
1903
  """Parses the values of "--field"-like options.
1904

1905
  @type selected: string or None
1906
  @param selected: User-selected options
1907
  @type default: list
1908
  @param default: Default fields
1909

1910
  """
1911
  if selected is None:
1912
    return default
1913

    
1914
  if selected.startswith("+"):
1915
    return default + selected[1:].split(",")
1916

    
1917
  return selected.split(",")
1918

    
1919

    
1920
UsesRPC = rpc.RunWithRPC
1921

    
1922

    
1923
def AskUser(text, choices=None):
1924
  """Ask the user a question.
1925

1926
  @param text: the question to ask
1927

1928
  @param choices: list with elements tuples (input_char, return_value,
1929
      description); if not given, it will default to: [('y', True,
1930
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1931
      note that the '?' char is reserved for help
1932

1933
  @return: one of the return values from the choices list; if input is
1934
      not possible (i.e. not running with a tty, we return the last
1935
      entry from the list
1936

1937
  """
1938
  if choices is None:
1939
    choices = [("y", True, "Perform the operation"),
1940
               ("n", False, "Do not perform the operation")]
1941
  if not choices or not isinstance(choices, list):
1942
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1943
  for entry in choices:
1944
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1945
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1946

    
1947
  answer = choices[-1][1]
1948
  new_text = []
1949
  for line in text.splitlines():
1950
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1951
  text = "\n".join(new_text)
1952
  try:
1953
    f = file("/dev/tty", "a+")
1954
  except IOError:
1955
    return answer
1956
  try:
1957
    chars = [entry[0] for entry in choices]
1958
    chars[-1] = "[%s]" % chars[-1]
1959
    chars.append("?")
1960
    maps = dict([(entry[0], entry[1]) for entry in choices])
1961
    while True:
1962
      f.write(text)
1963
      f.write("\n")
1964
      f.write("/".join(chars))
1965
      f.write(": ")
1966
      line = f.readline(2).strip().lower()
1967
      if line in maps:
1968
        answer = maps[line]
1969
        break
1970
      elif line == "?":
1971
        for entry in choices:
1972
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1973
        f.write("\n")
1974
        continue
1975
  finally:
1976
    f.close()
1977
  return answer
1978

    
1979

    
1980
class JobSubmittedException(Exception):
1981
  """Job was submitted, client should exit.
1982

1983
  This exception has one argument, the ID of the job that was
1984
  submitted. The handler should print this ID.
1985

1986
  This is not an error, just a structured way to exit from clients.
1987

1988
  """
1989

    
1990

    
1991
def SendJob(ops, cl=None):
1992
  """Function to submit an opcode without waiting for the results.
1993

1994
  @type ops: list
1995
  @param ops: list of opcodes
1996
  @type cl: luxi.Client
1997
  @param cl: the luxi client to use for communicating with the master;
1998
             if None, a new client will be created
1999

2000
  """
2001
  if cl is None:
2002
    cl = GetClient()
2003

    
2004
  job_id = cl.SubmitJob(ops)
2005

    
2006
  return job_id
2007

    
2008

    
2009
def GenericPollJob(job_id, cbs, report_cbs):
2010
  """Generic job-polling function.
2011

2012
  @type job_id: number
2013
  @param job_id: Job ID
2014
  @type cbs: Instance of L{JobPollCbBase}
2015
  @param cbs: Data callbacks
2016
  @type report_cbs: Instance of L{JobPollReportCbBase}
2017
  @param report_cbs: Reporting callbacks
2018

2019
  """
2020
  prev_job_info = None
2021
  prev_logmsg_serial = None
2022

    
2023
  status = None
2024

    
2025
  while True:
2026
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2027
                                      prev_logmsg_serial)
2028
    if not result:
2029
      # job not found, go away!
2030
      raise errors.JobLost("Job with id %s lost" % job_id)
2031

    
2032
    if result == constants.JOB_NOTCHANGED:
2033
      report_cbs.ReportNotChanged(job_id, status)
2034

    
2035
      # Wait again
2036
      continue
2037

    
2038
    # Split result, a tuple of (field values, log entries)
2039
    (job_info, log_entries) = result
2040
    (status, ) = job_info
2041

    
2042
    if log_entries:
2043
      for log_entry in log_entries:
2044
        (serial, timestamp, log_type, message) = log_entry
2045
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2046
                                    log_type, message)
2047
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2048

    
2049
    # TODO: Handle canceled and archived jobs
2050
    elif status in (constants.JOB_STATUS_SUCCESS,
2051
                    constants.JOB_STATUS_ERROR,
2052
                    constants.JOB_STATUS_CANCELING,
2053
                    constants.JOB_STATUS_CANCELED):
2054
      break
2055

    
2056
    prev_job_info = job_info
2057

    
2058
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2059
  if not jobs:
2060
    raise errors.JobLost("Job with id %s lost" % job_id)
2061

    
2062
  status, opstatus, result = jobs[0]
2063

    
2064
  if status == constants.JOB_STATUS_SUCCESS:
2065
    return result
2066

    
2067
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2068
    raise errors.OpExecError("Job was canceled")
2069

    
2070
  has_ok = False
2071
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2072
    if status == constants.OP_STATUS_SUCCESS:
2073
      has_ok = True
2074
    elif status == constants.OP_STATUS_ERROR:
2075
      errors.MaybeRaise(msg)
2076

    
2077
      if has_ok:
2078
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2079
                                 (idx, msg))
2080

    
2081
      raise errors.OpExecError(str(msg))
2082

    
2083
  # default failure mode
2084
  raise errors.OpExecError(result)
2085

    
2086

    
2087
class JobPollCbBase:
2088
  """Base class for L{GenericPollJob} callbacks.
2089

2090
  """
2091
  def __init__(self):
2092
    """Initializes this class.
2093

2094
    """
2095

    
2096
  def WaitForJobChangeOnce(self, job_id, fields,
2097
                           prev_job_info, prev_log_serial):
2098
    """Waits for changes on a job.
2099

2100
    """
2101
    raise NotImplementedError()
2102

    
2103
  def QueryJobs(self, job_ids, fields):
2104
    """Returns the selected fields for the selected job IDs.
2105

2106
    @type job_ids: list of numbers
2107
    @param job_ids: Job IDs
2108
    @type fields: list of strings
2109
    @param fields: Fields
2110

2111
    """
2112
    raise NotImplementedError()
2113

    
2114

    
2115
class JobPollReportCbBase:
2116
  """Base class for L{GenericPollJob} reporting callbacks.
2117

2118
  """
2119
  def __init__(self):
2120
    """Initializes this class.
2121

2122
    """
2123

    
2124
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2125
    """Handles a log message.
2126

2127
    """
2128
    raise NotImplementedError()
2129

    
2130
  def ReportNotChanged(self, job_id, status):
2131
    """Called for if a job hasn't changed in a while.
2132

2133
    @type job_id: number
2134
    @param job_id: Job ID
2135
    @type status: string or None
2136
    @param status: Job status if available
2137

2138
    """
2139
    raise NotImplementedError()
2140

    
2141

    
2142
class _LuxiJobPollCb(JobPollCbBase):
2143
  def __init__(self, cl):
2144
    """Initializes this class.
2145

2146
    """
2147
    JobPollCbBase.__init__(self)
2148
    self.cl = cl
2149

    
2150
  def WaitForJobChangeOnce(self, job_id, fields,
2151
                           prev_job_info, prev_log_serial):
2152
    """Waits for changes on a job.
2153

2154
    """
2155
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2156
                                        prev_job_info, prev_log_serial)
2157

    
2158
  def QueryJobs(self, job_ids, fields):
2159
    """Returns the selected fields for the selected job IDs.
2160

2161
    """
2162
    return self.cl.QueryJobs(job_ids, fields)
2163

    
2164

    
2165
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2166
  def __init__(self, feedback_fn):
2167
    """Initializes this class.
2168

2169
    """
2170
    JobPollReportCbBase.__init__(self)
2171

    
2172
    self.feedback_fn = feedback_fn
2173

    
2174
    assert callable(feedback_fn)
2175

    
2176
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2177
    """Handles a log message.
2178

2179
    """
2180
    self.feedback_fn((timestamp, log_type, log_msg))
2181

    
2182
  def ReportNotChanged(self, job_id, status):
2183
    """Called if a job hasn't changed in a while.
2184

2185
    """
2186
    # Ignore
2187

    
2188

    
2189
class StdioJobPollReportCb(JobPollReportCbBase):
2190
  def __init__(self):
2191
    """Initializes this class.
2192

2193
    """
2194
    JobPollReportCbBase.__init__(self)
2195

    
2196
    self.notified_queued = False
2197
    self.notified_waitlock = False
2198

    
2199
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2200
    """Handles a log message.
2201

2202
    """
2203
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2204
             FormatLogMessage(log_type, log_msg))
2205

    
2206
  def ReportNotChanged(self, job_id, status):
2207
    """Called if a job hasn't changed in a while.
2208

2209
    """
2210
    if status is None:
2211
      return
2212

    
2213
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2214
      ToStderr("Job %s is waiting in queue", job_id)
2215
      self.notified_queued = True
2216

    
2217
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2218
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2219
      self.notified_waitlock = True
2220

    
2221

    
2222
def FormatLogMessage(log_type, log_msg):
2223
  """Formats a job message according to its type.
2224

2225
  """
2226
  if log_type != constants.ELOG_MESSAGE:
2227
    log_msg = str(log_msg)
2228

    
2229
  return utils.SafeEncode(log_msg)
2230

    
2231

    
2232
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2233
  """Function to poll for the result of a job.
2234

2235
  @type job_id: job identified
2236
  @param job_id: the job to poll for results
2237
  @type cl: luxi.Client
2238
  @param cl: the luxi client to use for communicating with the master;
2239
             if None, a new client will be created
2240

2241
  """
2242
  if cl is None:
2243
    cl = GetClient()
2244

    
2245
  if reporter is None:
2246
    if feedback_fn:
2247
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2248
    else:
2249
      reporter = StdioJobPollReportCb()
2250
  elif feedback_fn:
2251
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2252

    
2253
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2254

    
2255

    
2256
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2257
  """Legacy function to submit an opcode.
2258

2259
  This is just a simple wrapper over the construction of the processor
2260
  instance. It should be extended to better handle feedback and
2261
  interaction functions.
2262

2263
  """
2264
  if cl is None:
2265
    cl = GetClient()
2266

    
2267
  SetGenericOpcodeOpts([op], opts)
2268

    
2269
  job_id = SendJob([op], cl=cl)
2270

    
2271
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2272
                       reporter=reporter)
2273

    
2274
  return op_results[0]
2275

    
2276

    
2277
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2278
  """Wrapper around SubmitOpCode or SendJob.
2279

2280
  This function will decide, based on the 'opts' parameter, whether to
2281
  submit and wait for the result of the opcode (and return it), or
2282
  whether to just send the job and print its identifier. It is used in
2283
  order to simplify the implementation of the '--submit' option.
2284

2285
  It will also process the opcodes if we're sending the via SendJob
2286
  (otherwise SubmitOpCode does it).
2287

2288
  """
2289
  if opts and opts.submit_only:
2290
    job = [op]
2291
    SetGenericOpcodeOpts(job, opts)
2292
    job_id = SendJob(job, cl=cl)
2293
    raise JobSubmittedException(job_id)
2294
  else:
2295
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2296

    
2297

    
2298
def _InitReasonTrail(op, opts):
2299
  """Builds the first part of the reason trail
2300

2301
  Builds the initial part of the reason trail, adding the user provided reason
2302
  (if it exists) and the name of the command starting the operation.
2303

2304
  @param op: the opcode the reason trail will be added to
2305
  @param opts: the command line options selected by the user
2306

2307
  """
2308
  assert len(sys.argv) >= 2
2309
  trail = []
2310

    
2311
  if opts.reason:
2312
    trail.append((constants.OPCODE_REASON_SRC_USER,
2313
                  opts.reason,
2314
                  utils.EpochNano()))
2315

    
2316
  binary = os.path.basename(sys.argv[0])
2317
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2318
  command = sys.argv[1]
2319
  trail.append((source, command, utils.EpochNano()))
2320
  op.reason = trail
2321

    
2322

    
2323
def SetGenericOpcodeOpts(opcode_list, options):
2324
  """Processor for generic options.
2325

2326
  This function updates the given opcodes based on generic command
2327
  line options (like debug, dry-run, etc.).
2328

2329
  @param opcode_list: list of opcodes
2330
  @param options: command line options or None
2331
  @return: None (in-place modification)
2332

2333
  """
2334
  if not options:
2335
    return
2336
  for op in opcode_list:
2337
    op.debug_level = options.debug
2338
    if hasattr(options, "dry_run"):
2339
      op.dry_run = options.dry_run
2340
    if getattr(options, "priority", None) is not None:
2341
      op.priority = options.priority
2342
    _InitReasonTrail(op, options)
2343

    
2344

    
2345
def GetClient(query=False):
2346
  """Connects to the a luxi socket and returns a client.
2347

2348
  @type query: boolean
2349
  @param query: this signifies that the client will only be
2350
      used for queries; if the build-time parameter
2351
      enable-split-queries is enabled, then the client will be
2352
      connected to the query socket instead of the masterd socket
2353

2354
  """
2355
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2356
  if override_socket:
2357
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2358
      address = pathutils.MASTER_SOCKET
2359
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2360
      address = pathutils.QUERY_SOCKET
2361
    else:
2362
      address = override_socket
2363
  elif query and constants.ENABLE_SPLIT_QUERY:
2364
    address = pathutils.QUERY_SOCKET
2365
  else:
2366
    address = None
2367
  # TODO: Cache object?
2368
  try:
2369
    client = luxi.Client(address=address)
2370
  except luxi.NoMasterError:
2371
    ss = ssconf.SimpleStore()
2372

    
2373
    # Try to read ssconf file
2374
    try:
2375
      ss.GetMasterNode()
2376
    except errors.ConfigurationError:
2377
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2378
                                 " not part of a cluster",
2379
                                 errors.ECODE_INVAL)
2380

    
2381
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2382
    if master != myself:
2383
      raise errors.OpPrereqError("This is not the master node, please connect"
2384
                                 " to node '%s' and rerun the command" %
2385
                                 master, errors.ECODE_INVAL)
2386
    raise
2387
  return client
2388

    
2389

    
2390
def FormatError(err):
2391
  """Return a formatted error message for a given error.
2392

2393
  This function takes an exception instance and returns a tuple
2394
  consisting of two values: first, the recommended exit code, and
2395
  second, a string describing the error message (not
2396
  newline-terminated).
2397

2398
  """
2399
  retcode = 1
2400
  obuf = StringIO()
2401
  msg = str(err)
2402
  if isinstance(err, errors.ConfigurationError):
2403
    txt = "Corrupt configuration file: %s" % msg
2404
    logging.error(txt)
2405
    obuf.write(txt + "\n")
2406
    obuf.write("Aborting.")
2407
    retcode = 2
2408
  elif isinstance(err, errors.HooksAbort):
2409
    obuf.write("Failure: hooks execution failed:\n")
2410
    for node, script, out in err.args[0]:
2411
      if out:
2412
        obuf.write("  node: %s, script: %s, output: %s\n" %
2413
                   (node, script, out))
2414
      else:
2415
        obuf.write("  node: %s, script: %s (no output)\n" %
2416
                   (node, script))
2417
  elif isinstance(err, errors.HooksFailure):
2418
    obuf.write("Failure: hooks general failure: %s" % msg)
2419
  elif isinstance(err, errors.ResolverError):
2420
    this_host = netutils.Hostname.GetSysName()
2421
    if err.args[0] == this_host:
2422
      msg = "Failure: can't resolve my own hostname ('%s')"
2423
    else:
2424
      msg = "Failure: can't resolve hostname '%s'"
2425
    obuf.write(msg % err.args[0])
2426
  elif isinstance(err, errors.OpPrereqError):
2427
    if len(err.args) == 2:
2428
      obuf.write("Failure: prerequisites not met for this"
2429
                 " operation:\nerror type: %s, error details:\n%s" %
2430
                 (err.args[1], err.args[0]))
2431
    else:
2432
      obuf.write("Failure: prerequisites not met for this"
2433
                 " operation:\n%s" % msg)
2434
  elif isinstance(err, errors.OpExecError):
2435
    obuf.write("Failure: command execution error:\n%s" % msg)
2436
  elif isinstance(err, errors.TagError):
2437
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2438
  elif isinstance(err, errors.JobQueueDrainError):
2439
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2440
               " accept new requests\n")
2441
  elif isinstance(err, errors.JobQueueFull):
2442
    obuf.write("Failure: the job queue is full and doesn't accept new"
2443
               " job submissions until old jobs are archived\n")
2444
  elif isinstance(err, errors.TypeEnforcementError):
2445
    obuf.write("Parameter Error: %s" % msg)
2446
  elif isinstance(err, errors.ParameterError):
2447
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2448
  elif isinstance(err, luxi.NoMasterError):
2449
    if err.args[0] == pathutils.MASTER_SOCKET:
2450
      daemon = "the master daemon"
2451
    elif err.args[0] == pathutils.QUERY_SOCKET:
2452
      daemon = "the config daemon"
2453
    else:
2454
      daemon = "socket '%s'" % str(err.args[0])
2455
    obuf.write("Cannot communicate with %s.\nIs the process running"
2456
               " and listening for connections?" % daemon)
2457
  elif isinstance(err, luxi.TimeoutError):
2458
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2459
               " been submitted and will continue to run even if the call"
2460
               " timed out. Useful commands in this situation are \"gnt-job"
2461
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2462
    obuf.write(msg)
2463
  elif isinstance(err, luxi.PermissionError):
2464
    obuf.write("It seems you don't have permissions to connect to the"
2465
               " master daemon.\nPlease retry as a different user.")
2466
  elif isinstance(err, luxi.ProtocolError):
2467
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2468
               "%s" % msg)
2469
  elif isinstance(err, errors.JobLost):
2470
    obuf.write("Error checking job status: %s" % msg)
2471
  elif isinstance(err, errors.QueryFilterParseError):
2472
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2473
    obuf.write("\n".join(err.GetDetails()))
2474
  elif isinstance(err, errors.GenericError):
2475
    obuf.write("Unhandled Ganeti error: %s" % msg)
2476
  elif isinstance(err, JobSubmittedException):
2477
    obuf.write("JobID: %s\n" % err.args[0])
2478
    retcode = 0
2479
  else:
2480
    obuf.write("Unhandled exception: %s" % msg)
2481
  return retcode, obuf.getvalue().rstrip("\n")
2482

    
2483

    
2484
def GenericMain(commands, override=None, aliases=None,
2485
                env_override=frozenset()):
2486
  """Generic main function for all the gnt-* commands.
2487

2488
  @param commands: a dictionary with a special structure, see the design doc
2489
                   for command line handling.
2490
  @param override: if not None, we expect a dictionary with keys that will
2491
                   override command line options; this can be used to pass
2492
                   options from the scripts to generic functions
2493
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2494
  @param env_override: list of environment names which are allowed to submit
2495
                       default args for commands
2496

2497
  """
2498
  # save the program name and the entire command line for later logging
2499
  if sys.argv:
2500
    binary = os.path.basename(sys.argv[0])
2501
    if not binary:
2502
      binary = sys.argv[0]
2503

    
2504
    if len(sys.argv) >= 2:
2505
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2506
    else:
2507
      logname = binary
2508

    
2509
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2510
  else:
2511
    binary = "<unknown program>"
2512
    cmdline = "<unknown>"
2513

    
2514
  if aliases is None:
2515
    aliases = {}
2516

    
2517
  try:
2518
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2519
                                       env_override)
2520
  except _ShowVersion:
2521
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2522
             constants.RELEASE_VERSION)
2523
    return constants.EXIT_SUCCESS
2524
  except _ShowUsage, err:
2525
    for line in _FormatUsage(binary, commands):
2526
      ToStdout(line)
2527

    
2528
    if err.exit_error:
2529
      return constants.EXIT_FAILURE
2530
    else:
2531
      return constants.EXIT_SUCCESS
2532
  except errors.ParameterError, err:
2533
    result, err_msg = FormatError(err)
2534
    ToStderr(err_msg)
2535
    return 1
2536

    
2537
  if func is None: # parse error
2538
    return 1
2539

    
2540
  if override is not None:
2541
    for key, val in override.iteritems():
2542
      setattr(options, key, val)
2543

    
2544
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2545
                     stderr_logging=True)
2546

    
2547
  logging.info("Command line: %s", cmdline)
2548

    
2549
  try:
2550
    result = func(options, args)
2551
  except (errors.GenericError, luxi.ProtocolError,
2552
          JobSubmittedException), err:
2553
    result, err_msg = FormatError(err)
2554
    logging.exception("Error during command processing")
2555
    ToStderr(err_msg)
2556
  except KeyboardInterrupt:
2557
    result = constants.EXIT_FAILURE
2558
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2559
             " might have been submitted and"
2560
             " will continue to run in the background.")
2561
  except IOError, err:
2562
    if err.errno == errno.EPIPE:
2563
      # our terminal went away, we'll exit
2564
      sys.exit(constants.EXIT_FAILURE)
2565
    else:
2566
      raise
2567

    
2568
  return result
2569

    
2570

    
2571
def ParseNicOption(optvalue):
2572
  """Parses the value of the --net option(s).
2573

2574
  """
2575
  try:
2576
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2577
  except (TypeError, ValueError), err:
2578
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2579
                               errors.ECODE_INVAL)
2580

    
2581
  nics = [{}] * nic_max
2582
  for nidx, ndict in optvalue:
2583
    nidx = int(nidx)
2584

    
2585
    if not isinstance(ndict, dict):
2586
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2587
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2588

    
2589
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2590

    
2591
    nics[nidx] = ndict
2592

    
2593
  return nics
2594

    
2595

    
2596
def GenericInstanceCreate(mode, opts, args):
2597
  """Add an instance to the cluster via either creation or import.
2598

2599
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2600
  @param opts: the command line options selected by the user
2601
  @type args: list
2602
  @param args: should contain only one element, the new instance name
2603
  @rtype: int
2604
  @return: the desired exit code
2605

2606
  """
2607
  instance = args[0]
2608

    
2609
  (pnode, snode) = SplitNodeOption(opts.node)
2610

    
2611
  hypervisor = None
2612
  hvparams = {}
2613
  if opts.hypervisor:
2614
    hypervisor, hvparams = opts.hypervisor
2615

    
2616
  if opts.nics:
2617
    nics = ParseNicOption(opts.nics)
2618
  elif opts.no_nics:
2619
    # no nics
2620
    nics = []
2621
  elif mode == constants.INSTANCE_CREATE:
2622
    # default of one nic, all auto
2623
    nics = [{}]
2624
  else:
2625
    # mode == import
2626
    nics = []
2627

    
2628
  if opts.disk_template == constants.DT_DISKLESS:
2629
    if opts.disks or opts.sd_size is not None:
2630
      raise errors.OpPrereqError("Diskless instance but disk"
2631
                                 " information passed", errors.ECODE_INVAL)
2632
    disks = []
2633
  else:
2634
    if (not opts.disks and not opts.sd_size
2635
        and mode == constants.INSTANCE_CREATE):
2636
      raise errors.OpPrereqError("No disk information specified",
2637
                                 errors.ECODE_INVAL)
2638
    if opts.disks and opts.sd_size is not None:
2639
      raise errors.OpPrereqError("Please use either the '--disk' or"
2640
                                 " '-s' option", errors.ECODE_INVAL)
2641
    if opts.sd_size is not None:
2642
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2643

    
2644
    if opts.disks:
2645
      try:
2646
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2647
      except ValueError, err:
2648
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2649
                                   errors.ECODE_INVAL)
2650
      disks = [{}] * disk_max
2651
    else:
2652
      disks = []
2653
    for didx, ddict in opts.disks:
2654
      didx = int(didx)
2655
      if not isinstance(ddict, dict):
2656
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2657
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2658
      elif constants.IDISK_SIZE in ddict:
2659
        if constants.IDISK_ADOPT in ddict:
2660
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2661
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2662
        try:
2663
          ddict[constants.IDISK_SIZE] = \
2664
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2665
        except ValueError, err:
2666
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2667
                                     (didx, err), errors.ECODE_INVAL)
2668
      elif constants.IDISK_ADOPT in ddict:
2669
        if mode == constants.INSTANCE_IMPORT:
2670
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2671
                                     " import", errors.ECODE_INVAL)
2672
        ddict[constants.IDISK_SIZE] = 0
2673
      else:
2674
        raise errors.OpPrereqError("Missing size or adoption source for"
2675
                                   " disk %d" % didx, errors.ECODE_INVAL)
2676
      disks[didx] = ddict
2677

    
2678
  if opts.tags is not None:
2679
    tags = opts.tags.split(",")
2680
  else:
2681
    tags = []
2682

    
2683
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2684
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2685

    
2686
  if mode == constants.INSTANCE_CREATE:
2687
    start = opts.start
2688
    os_type = opts.os
2689
    force_variant = opts.force_variant
2690
    src_node = None
2691
    src_path = None
2692
    no_install = opts.no_install
2693
    identify_defaults = False
2694
  elif mode == constants.INSTANCE_IMPORT:
2695
    start = False
2696
    os_type = None
2697
    force_variant = False
2698
    src_node = opts.src_node
2699
    src_path = opts.src_dir
2700
    no_install = None
2701
    identify_defaults = opts.identify_defaults
2702
  else:
2703
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2704

    
2705
  op = opcodes.OpInstanceCreate(instance_name=instance,
2706
                                disks=disks,
2707
                                disk_template=opts.disk_template,
2708
                                nics=nics,
2709
                                conflicts_check=opts.conflicts_check,
2710
                                pnode=pnode, snode=snode,
2711
                                ip_check=opts.ip_check,
2712
                                name_check=opts.name_check,
2713
                                wait_for_sync=opts.wait_for_sync,
2714
                                file_storage_dir=opts.file_storage_dir,
2715
                                file_driver=opts.file_driver,
2716
                                iallocator=opts.iallocator,
2717
                                hypervisor=hypervisor,
2718
                                hvparams=hvparams,
2719
                                beparams=opts.beparams,
2720
                                osparams=opts.osparams,
2721
                                mode=mode,
2722
                                start=start,
2723
                                os_type=os_type,
2724
                                force_variant=force_variant,
2725
                                src_node=src_node,
2726
                                src_path=src_path,
2727
                                tags=tags,
2728
                                no_install=no_install,
2729
                                identify_defaults=identify_defaults,
2730
                                ignore_ipolicy=opts.ignore_ipolicy)
2731

    
2732
  SubmitOrSend(op, opts)
2733
  return 0
2734

    
2735

    
2736
class _RunWhileClusterStoppedHelper:
2737
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2738

2739
  """
2740
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2741
    """Initializes this class.
2742

2743
    @type feedback_fn: callable
2744
    @param feedback_fn: Feedback function
2745
    @type cluster_name: string
2746
    @param cluster_name: Cluster name
2747
    @type master_node: string
2748
    @param master_node Master node name
2749
    @type online_nodes: list
2750
    @param online_nodes: List of names of online nodes
2751

2752
    """
2753
    self.feedback_fn = feedback_fn
2754
    self.cluster_name = cluster_name
2755
    self.master_node = master_node
2756
    self.online_nodes = online_nodes
2757

    
2758
    self.ssh = ssh.SshRunner(self.cluster_name)
2759

    
2760
    self.nonmaster_nodes = [name for name in online_nodes
2761
                            if name != master_node]
2762

    
2763
    assert self.master_node not in self.nonmaster_nodes
2764

    
2765
  def _RunCmd(self, node_name, cmd):
2766
    """Runs a command on the local or a remote machine.
2767

2768
    @type node_name: string
2769
    @param node_name: Machine name
2770
    @type cmd: list
2771
    @param cmd: Command
2772

2773
    """
2774
    if node_name is None or node_name == self.master_node:
2775
      # No need to use SSH
2776
      result = utils.RunCmd(cmd)
2777
    else:
2778
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2779
                            utils.ShellQuoteArgs(cmd))
2780

    
2781
    if result.failed:
2782
      errmsg = ["Failed to run command %s" % result.cmd]
2783
      if node_name:
2784
        errmsg.append("on node %s" % node_name)
2785
      errmsg.append(": exitcode %s and error %s" %
2786
                    (result.exit_code, result.output))
2787
      raise errors.OpExecError(" ".join(errmsg))
2788

    
2789
  def Call(self, fn, *args):
2790
    """Call function while all daemons are stopped.
2791

2792
    @type fn: callable
2793
    @param fn: Function to be called
2794

2795
    """
2796
    # Pause watcher by acquiring an exclusive lock on watcher state file
2797
    self.feedback_fn("Blocking watcher")
2798
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2799
    try:
2800
      # TODO: Currently, this just blocks. There's no timeout.
2801
      # TODO: Should it be a shared lock?
2802
      watcher_block.Exclusive(blocking=True)
2803

    
2804
      # Stop master daemons, so that no new jobs can come in and all running
2805
      # ones are finished
2806
      self.feedback_fn("Stopping master daemons")
2807
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2808
      try:
2809
        # Stop daemons on all nodes
2810
        for node_name in self.online_nodes:
2811
          self.feedback_fn("Stopping daemons on %s" % node_name)
2812
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2813

    
2814
        # All daemons are shut down now
2815
        try:
2816
          return fn(self, *args)
2817
        except Exception, err:
2818
          _, errmsg = FormatError(err)
2819
          logging.exception("Caught exception")
2820
          self.feedback_fn(errmsg)
2821
          raise
2822
      finally:
2823
        # Start cluster again, master node last
2824
        for node_name in self.nonmaster_nodes + [self.master_node]:
2825
          self.feedback_fn("Starting daemons on %s" % node_name)
2826
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2827
    finally:
2828
      # Resume watcher
2829
      watcher_block.Close()
2830

    
2831

    
2832
def RunWhileClusterStopped(feedback_fn, fn, *args):
2833
  """Calls a function while all cluster daemons are stopped.
2834

2835
  @type feedback_fn: callable
2836
  @param feedback_fn: Feedback function
2837
  @type fn: callable
2838
  @param fn: Function to be called when daemons are stopped
2839

2840
  """
2841
  feedback_fn("Gathering cluster information")
2842

    
2843
  # This ensures we're running on the master daemon
2844
  cl = GetClient()
2845

    
2846
  (cluster_name, master_node) = \
2847
    cl.QueryConfigValues(["cluster_name", "master_node"])
2848

    
2849
  online_nodes = GetOnlineNodes([], cl=cl)
2850

    
2851
  # Don't keep a reference to the client. The master daemon will go away.
2852
  del cl
2853

    
2854
  assert master_node in online_nodes
2855

    
2856
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2857
                                       online_nodes).Call(fn, *args)
2858

    
2859

    
2860
def GenerateTable(headers, fields, separator, data,
2861
                  numfields=None, unitfields=None,
2862
                  units=None):
2863
  """Prints a table with headers and different fields.
2864

2865
  @type headers: dict
2866
  @param headers: dictionary mapping field names to headers for
2867
      the table
2868
  @type fields: list
2869
  @param fields: the field names corresponding to each row in
2870
      the data field
2871
  @param separator: the separator to be used; if this is None,
2872
      the default 'smart' algorithm is used which computes optimal
2873
      field width, otherwise just the separator is used between
2874
      each field
2875
  @type data: list
2876
  @param data: a list of lists, each sublist being one row to be output
2877
  @type numfields: list
2878
  @param numfields: a list with the fields that hold numeric
2879
      values and thus should be right-aligned
2880
  @type unitfields: list
2881
  @param unitfields: a list with the fields that hold numeric
2882
      values that should be formatted with the units field
2883
  @type units: string or None
2884
  @param units: the units we should use for formatting, or None for
2885
      automatic choice (human-readable for non-separator usage, otherwise
2886
      megabytes); this is a one-letter string
2887

2888
  """
2889
  if units is None:
2890
    if separator:
2891
      units = "m"
2892
    else:
2893
      units = "h"
2894

    
2895
  if numfields is None:
2896
    numfields = []
2897
  if unitfields is None:
2898
    unitfields = []
2899

    
2900
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2901
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2902

    
2903
  format_fields = []
2904
  for field in fields:
2905
    if headers and field not in headers:
2906
      # TODO: handle better unknown fields (either revert to old
2907
      # style of raising exception, or deal more intelligently with
2908
      # variable fields)
2909
      headers[field] = field
2910
    if separator is not None:
2911
      format_fields.append("%s")
2912
    elif numfields.Matches(field):
2913
      format_fields.append("%*s")
2914
    else:
2915
      format_fields.append("%-*s")
2916

    
2917
  if separator is None:
2918
    mlens = [0 for name in fields]
2919
    format_str = " ".join(format_fields)
2920
  else:
2921
    format_str = separator.replace("%", "%%").join(format_fields)
2922

    
2923
  for row in data:
2924
    if row is None:
2925
      continue
2926
    for idx, val in enumerate(row):
2927
      if unitfields.Matches(fields[idx]):
2928
        try:
2929
          val = int(val)
2930
        except (TypeError, ValueError):
2931
          pass
2932
        else:
2933
          val = row[idx] = utils.FormatUnit(val, units)
2934
      val = row[idx] = str(val)
2935
      if separator is None:
2936
        mlens[idx] = max(mlens[idx], len(val))
2937

    
2938
  result = []
2939
  if headers:
2940
    args = []
2941
    for idx, name in enumerate(fields):
2942
      hdr = headers[name]
2943
      if separator is None:
2944
        mlens[idx] = max(mlens[idx], len(hdr))
2945
        args.append(mlens[idx])
2946
      args.append(hdr)
2947
    result.append(format_str % tuple(args))
2948

    
2949
  if separator is None:
2950
    assert len(mlens) == len(fields)
2951

    
2952
    if fields and not numfields.Matches(fields[-1]):
2953
      mlens[-1] = 0
2954

    
2955
  for line in data:
2956
    args = []
2957
    if line is None:
2958
      line = ["-" for _ in fields]
2959
    for idx in range(len(fields)):
2960
      if separator is None:
2961
        args.append(mlens[idx])
2962
      args.append(line[idx])
2963
    result.append(format_str % tuple(args))
2964

    
2965
  return result
2966

    
2967

    
2968
def _FormatBool(value):
2969
  """Formats a boolean value as a string.
2970

2971
  """
2972
  if value:
2973
    return "Y"
2974
  return "N"
2975

    
2976

    
2977
#: Default formatting for query results; (callback, align right)
2978
_DEFAULT_FORMAT_QUERY = {
2979
  constants.QFT_TEXT: (str, False),
2980
  constants.QFT_BOOL: (_FormatBool, False),
2981
  constants.QFT_NUMBER: (str, True),
2982
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2983
  constants.QFT_OTHER: (str, False),
2984
  constants.QFT_UNKNOWN: (str, False),
2985
  }
2986

    
2987

    
2988
def _GetColumnFormatter(fdef, override, unit):
2989
  """Returns formatting function for a field.
2990

2991
  @type fdef: L{objects.QueryFieldDefinition}
2992
  @type override: dict
2993
  @param override: Dictionary for overriding field formatting functions,
2994
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2995
  @type unit: string
2996
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2997
  @rtype: tuple; (callable, bool)
2998
  @return: Returns the function to format a value (takes one parameter) and a
2999
    boolean for aligning the value on the right-hand side
3000

3001
  """
3002
  fmt = override.get(fdef.name, None)
3003
  if fmt is not None:
3004
    return fmt
3005

    
3006
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3007

    
3008
  if fdef.kind == constants.QFT_UNIT:
3009
    # Can't keep this information in the static dictionary
3010
    return (lambda value: utils.FormatUnit(value, unit), True)
3011

    
3012
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3013
  if fmt is not None:
3014
    return fmt
3015

    
3016
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3017

    
3018

    
3019
class _QueryColumnFormatter:
3020
  """Callable class for formatting fields of a query.
3021

3022
  """
3023
  def __init__(self, fn, status_fn, verbose):
3024
    """Initializes this class.
3025

3026
    @type fn: callable
3027
    @param fn: Formatting function
3028
    @type status_fn: callable
3029
    @param status_fn: Function to report fields' status
3030
    @type verbose: boolean
3031
    @param verbose: whether to use verbose field descriptions or not
3032

3033
    """
3034
    self._fn = fn
3035
    self._status_fn = status_fn
3036
    self._verbose = verbose
3037

    
3038
  def __call__(self, data):
3039
    """Returns a field's string representation.
3040

3041
    """
3042
    (status, value) = data
3043

    
3044
    # Report status
3045
    self._status_fn(status)
3046

    
3047
    if status == constants.RS_NORMAL:
3048
      return self._fn(value)
3049

    
3050
    assert value is None, \
3051
           "Found value %r for abnormal status %s" % (value, status)
3052

    
3053
    return FormatResultError(status, self._verbose)
3054

    
3055

    
3056
def FormatResultError(status, verbose):
3057
  """Formats result status other than L{constants.RS_NORMAL}.
3058

3059
  @param status: The result status
3060
  @type verbose: boolean
3061
  @param verbose: Whether to return the verbose text
3062
  @return: Text of result status
3063

3064
  """
3065
  assert status != constants.RS_NORMAL, \
3066
         "FormatResultError called with status equal to constants.RS_NORMAL"
3067
  try:
3068
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3069
  except KeyError:
3070
    raise NotImplementedError("Unknown status %s" % status)
3071
  else:
3072
    if verbose:
3073
      return verbose_text
3074
    return normal_text
3075

    
3076

    
3077
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3078
                      header=False, verbose=False):
3079
  """Formats data in L{objects.QueryResponse}.
3080

3081
  @type result: L{objects.QueryResponse}
3082
  @param result: result of query operation
3083
  @type unit: string
3084
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3085
    see L{utils.text.FormatUnit}
3086
  @type format_override: dict
3087
  @param format_override: Dictionary for overriding field formatting functions,
3088
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3089
  @type separator: string or None
3090
  @param separator: String used to separate fields
3091
  @type header: bool
3092
  @param header: Whether to output header row
3093
  @type verbose: boolean
3094
  @param verbose: whether to use verbose field descriptions or not
3095

3096
  """
3097
  if unit is None:
3098
    if separator:
3099
      unit = "m"
3100
    else:
3101
      unit = "h"
3102

    
3103
  if format_override is None:
3104
    format_override = {}
3105

    
3106
  stats = dict.fromkeys(constants.RS_ALL, 0)
3107

    
3108
  def _RecordStatus(status):
3109
    if status in stats:
3110
      stats[status] += 1
3111

    
3112
  columns = []
3113
  for fdef in result.fields:
3114
    assert fdef.title and fdef.name
3115
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3116
    columns.append(TableColumn(fdef.title,
3117
                               _QueryColumnFormatter(fn, _RecordStatus,
3118
                                                     verbose),
3119
                               align_right))
3120

    
3121
  table = FormatTable(result.data, columns, header, separator)
3122

    
3123
  # Collect statistics
3124
  assert len(stats) == len(constants.RS_ALL)
3125
  assert compat.all(count >= 0 for count in stats.values())
3126

    
3127
  # Determine overall status. If there was no data, unknown fields must be
3128
  # detected via the field definitions.
3129
  if (stats[constants.RS_UNKNOWN] or
3130
      (not result.data and _GetUnknownFields(result.fields))):
3131
    status = QR_UNKNOWN
3132
  elif compat.any(count > 0 for key, count in stats.items()
3133
                  if key != constants.RS_NORMAL):
3134
    status = QR_INCOMPLETE
3135
  else:
3136
    status = QR_NORMAL
3137

    
3138
  return (status, table)
3139

    
3140

    
3141
def _GetUnknownFields(fdefs):
3142
  """Returns list of unknown fields included in C{fdefs}.
3143

3144
  @type fdefs: list of L{objects.QueryFieldDefinition}
3145

3146
  """
3147
  return [fdef for fdef in fdefs
3148
          if fdef.kind == constants.QFT_UNKNOWN]
3149

    
3150

    
3151
def _WarnUnknownFields(fdefs):
3152
  """Prints a warning to stderr if a query included unknown fields.
3153

3154
  @type fdefs: list of L{objects.QueryFieldDefinition}
3155

3156
  """
3157
  unknown = _GetUnknownFields(fdefs)
3158
  if unknown:
3159
    ToStderr("Warning: Queried for unknown fields %s",
3160
             utils.CommaJoin(fdef.name for fdef in unknown))
3161
    return True
3162

    
3163
  return False
3164

    
3165

    
3166
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3167
                format_override=None, verbose=False, force_filter=False,
3168
                namefield=None, qfilter=None, isnumeric=False):
3169
  """Generic implementation for listing all items of a resource.
3170

3171
  @param resource: One of L{constants.QR_VIA_LUXI}
3172
  @type fields: list of strings
3173
  @param fields: List of fields to query for
3174
  @type names: list of strings
3175
  @param names: Names of items to query for
3176
  @type unit: string or None
3177
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3178
    None for automatic choice (human-readable for non-separator usage,
3179
    otherwise megabytes); this is a one-letter string
3180
  @type separator: string or None
3181
  @param separator: String used to separate fields
3182
  @type header: bool
3183
  @param header: Whether to show header row
3184
  @type force_filter: bool
3185
  @param force_filter: Whether to always treat names as filter
3186
  @type format_override: dict
3187
  @param format_override: Dictionary for overriding field formatting functions,
3188
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3189
  @type verbose: boolean
3190
  @param verbose: whether to use verbose field descriptions or not
3191
  @type namefield: string
3192
  @param namefield: Name of field to use for simple filters (see
3193
    L{qlang.MakeFilter} for details)
3194
  @type qfilter: list or None
3195
  @param qfilter: Query filter (in addition to names)
3196
  @param isnumeric: bool
3197
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3198
    any simple filters built by namefield should use integer values to
3199
    reflect that
3200

3201
  """
3202
  if not names:
3203
    names = None
3204

    
3205
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3206
                                isnumeric=isnumeric)
3207

    
3208
  if qfilter is None:
3209
    qfilter = namefilter
3210
  elif namefilter is not None:
3211
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3212

    
3213
  if cl is None:
3214
    cl = GetClient()
3215

    
3216
  response = cl.Query(resource, fields, qfilter)
3217

    
3218
  found_unknown = _WarnUnknownFields(response.fields)
3219

    
3220
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3221
                                     header=header,
3222
                                     format_override=format_override,
3223
                                     verbose=verbose)
3224

    
3225
  for line in data:
3226
    ToStdout(line)
3227

    
3228
  assert ((found_unknown and status == QR_UNKNOWN) or
3229
          (not found_unknown and status != QR_UNKNOWN))
3230

    
3231
  if status == QR_UNKNOWN:
3232
    return constants.EXIT_UNKNOWN_FIELD
3233

    
3234
  # TODO: Should the list command fail if not all data could be collected?
3235
  return constants.EXIT_SUCCESS
3236

    
3237

    
3238
def _FieldDescValues(fdef):
3239
  """Helper function for L{GenericListFields} to get query field description.
3240

3241
  @type fdef: L{objects.QueryFieldDefinition}
3242
  @rtype: list
3243

3244
  """
3245
  return [
3246
    fdef.name,
3247
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3248
    fdef.title,
3249
    fdef.doc,
3250
    ]
3251

    
3252

    
3253
def GenericListFields(resource, fields, separator, header, cl=None):
3254
  """Generic implementation for listing fields for a resource.
3255

3256
  @param resource: One of L{constants.QR_VIA_LUXI}
3257
  @type fields: list of strings
3258
  @param fields: List of fields to query for
3259
  @type separator: string or None
3260
  @param separator: String used to separate fields
3261
  @type header: bool
3262
  @param header: Whether to show header row
3263

3264
  """
3265
  if cl is None:
3266
    cl = GetClient()
3267

    
3268
  if not fields:
3269
    fields = None
3270

    
3271
  response = cl.QueryFields(resource, fields)
3272

    
3273
  found_unknown = _WarnUnknownFields(response.fields)
3274

    
3275
  columns = [
3276
    TableColumn("Name", str, False),
3277
    TableColumn("Type", str, False),
3278
    TableColumn("Title", str, False),
3279
    TableColumn("Description", str, False),
3280
    ]
3281

    
3282
  rows = map(_FieldDescValues, response.fields)
3283

    
3284
  for line in FormatTable(rows, columns, header, separator):
3285
    ToStdout(line)
3286

    
3287
  if found_unknown:
3288
    return constants.EXIT_UNKNOWN_FIELD
3289

    
3290
  return constants.EXIT_SUCCESS
3291

    
3292

    
3293
class TableColumn:
3294
  """Describes a column for L{FormatTable}.
3295

3296
  """
3297
  def __init__(self, title, fn, align_right):
3298
    """Initializes this class.
3299

3300
    @type title: string
3301
    @param title: Column title
3302
    @type fn: callable
3303
    @param fn: Formatting function
3304
    @type align_right: bool
3305
    @param align_right: Whether to align values on the right-hand side
3306

3307
    """
3308
    self.title = title
3309
    self.format = fn
3310
    self.align_right = align_right
3311

    
3312

    
3313
def _GetColFormatString(width, align_right):
3314
  """Returns the format string for a field.
3315

3316
  """
3317
  if align_right:
3318
    sign = ""
3319
  else:
3320
    sign = "-"
3321

    
3322
  return "%%%s%ss" % (sign, width)
3323

    
3324

    
3325
def FormatTable(rows, columns, header, separator):
3326
  """Formats data as a table.
3327

3328
  @type rows: list of lists
3329
  @param rows: Row data, one list per row
3330
  @type columns: list of L{TableColumn}
3331
  @param columns: Column descriptions
3332
  @type header: bool
3333
  @param header: Whether to show header row
3334
  @type separator: string or None
3335
  @param separator: String used to separate columns
3336

3337
  """
3338
  if header:
3339
    data = [[col.title for col in columns]]
3340
    colwidth = [len(col.title) for col in columns]
3341
  else:
3342
    data = []
3343
    colwidth = [0 for _ in columns]
3344

    
3345
  # Format row data
3346
  for row in rows:
3347
    assert len(row) == len(columns)
3348

    
3349
    formatted = [col.format(value) for value, col in zip(row, columns)]
3350

    
3351
    if separator is None:
3352
      # Update column widths
3353
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3354
        # Modifying a list's items while iterating is fine
3355
        colwidth[idx] = max(oldwidth, len(value))
3356

    
3357
    data.append(formatted)
3358

    
3359
  if separator is not None:
3360
    # Return early if a separator is used
3361
    return [separator.join(row) for row in data]
3362

    
3363
  if columns and not columns[-1].align_right:
3364
    # Avoid unnecessary spaces at end of line
3365
    colwidth[-1] = 0
3366

    
3367
  # Build format string
3368
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3369
                  for col, width in zip(columns, colwidth)])
3370

    
3371
  return [fmt % tuple(row) for row in data]
3372

    
3373

    
3374
def FormatTimestamp(ts):
3375
  """Formats a given timestamp.
3376

3377
  @type ts: timestamp
3378
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3379

3380
  @rtype: string
3381
  @return: a string with the formatted timestamp
3382

3383
  """
3384
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3385
    return "?"
3386

    
3387
  (sec, usecs) = ts
3388
  return utils.FormatTime(sec, usecs=usecs)
3389

    
3390

    
3391
def ParseTimespec(value):
3392
  """Parse a time specification.
3393

3394
  The following suffixed will be recognized:
3395

3396
    - s: seconds
3397
    - m: minutes
3398
    - h: hours
3399
    - d: day
3400
    - w: weeks
3401

3402
  Without any suffix, the value will be taken to be in seconds.
3403

3404
  """
3405
  value = str(value)
3406
  if not value:
3407
    raise errors.OpPrereqError("Empty time specification passed",
3408
                               errors.ECODE_INVAL)
3409
  suffix_map = {
3410
    "s": 1,
3411
    "m": 60,
3412
    "h": 3600,
3413
    "d": 86400,
3414
    "w": 604800,
3415
    }
3416
  if value[-1] not in suffix_map:
3417
    try:
3418
      value = int(value)
3419
    except (TypeError, ValueError):
3420
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3421
                                 errors.ECODE_INVAL)
3422
  else:
3423
    multiplier = suffix_map[value[-1]]
3424
    value = value[:-1]
3425
    if not value: # no data left after stripping the suffix
3426
      raise errors.OpPrereqError("Invalid time specification (only"
3427
                                 " suffix passed)", errors.ECODE_INVAL)
3428
    try:
3429
      value = int(value) * multiplier
3430
    except (TypeError, ValueError):
3431
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3432
                                 errors.ECODE_INVAL)
3433
  return value
3434

    
3435

    
3436
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3437
                   filter_master=False, nodegroup=None):
3438
  """Returns the names of online nodes.
3439

3440
  This function will also log a warning on stderr with the names of
3441
  the online nodes.
3442

3443
  @param nodes: if not empty, use only this subset of nodes (minus the
3444
      offline ones)
3445
  @param cl: if not None, luxi client to use
3446
  @type nowarn: boolean
3447
  @param nowarn: by default, this function will output a note with the
3448
      offline nodes that are skipped; if this parameter is True the
3449
      note is not displayed
3450
  @type secondary_ips: boolean
3451
  @param secondary_ips: if True, return the secondary IPs instead of the
3452
      names, useful for doing network traffic over the replication interface
3453
      (if any)
3454
  @type filter_master: boolean
3455
  @param filter_master: if True, do not return the master node in the list
3456
      (useful in coordination with secondary_ips where we cannot check our
3457
      node name against the list)
3458
  @type nodegroup: string
3459
  @param nodegroup: If set, only return nodes in this node group
3460

3461
  """
3462
  if cl is None:
3463
    cl = GetClient()
3464

    
3465
  qfilter = []
3466

    
3467
  if nodes:
3468
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3469

    
3470
  if nodegroup is not None:
3471
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3472
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3473

    
3474
  if filter_master:
3475
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3476

    
3477
  if qfilter:
3478
    if len(qfilter) > 1:
3479
      final_filter = [qlang.OP_AND] + qfilter
3480
    else:
3481
      assert len(qfilter) == 1
3482
      final_filter = qfilter[0]
3483
  else:
3484
    final_filter = None
3485

    
3486
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3487

    
3488
  def _IsOffline(row):
3489
    (_, (_, offline), _) = row
3490
    return offline
3491

    
3492
  def _GetName(row):
3493
    ((_, name), _, _) = row
3494
    return name
3495

    
3496
  def _GetSip(row):
3497
    (_, _, (_, sip)) = row
3498
    return sip
3499

    
3500
  (offline, online) = compat.partition(result.data, _IsOffline)
3501

    
3502
  if offline and not nowarn:
3503
    ToStderr("Note: skipping offline node(s): %s" %
3504
             utils.CommaJoin(map(_GetName, offline)))
3505

    
3506
  if secondary_ips:
3507
    fn = _GetSip
3508
  else:
3509
    fn = _GetName
3510

    
3511
  return map(fn, online)
3512

    
3513

    
3514
def _ToStream(stream, txt, *args):
3515
  """Write a message to a stream, bypassing the logging system
3516

3517
  @type stream: file object
3518
  @param stream: the file to which we should write
3519
  @type txt: str
3520
  @param txt: the message
3521

3522
  """
3523
  try:
3524
    if args:
3525
      args = tuple(args)
3526
      stream.write(txt % args)
3527
    else:
3528
      stream.write(txt)
3529
    stream.write("\n")
3530
    stream.flush()
3531
  except IOError, err:
3532
    if err.errno == errno.EPIPE:
3533
      # our terminal went away, we'll exit
3534
      sys.exit(constants.EXIT_FAILURE)
3535
    else:
3536
      raise
3537

    
3538

    
3539
def ToStdout(txt, *args):
3540
  """Write a message to stdout only, bypassing the logging system
3541

3542
  This is just a wrapper over _ToStream.
3543

3544
  @type txt: str
3545
  @param txt: the message
3546

3547
  """
3548
  _ToStream(sys.stdout, txt, *args)
3549

    
3550

    
3551
def ToStderr(txt, *args):
3552
  """Write a message to stderr only, bypassing the logging system
3553

3554
  This is just a wrapper over _ToStream.
3555

3556
  @type txt: str
3557
  @param txt: the message
3558

3559
  """
3560
  _ToStream(sys.stderr, txt, *args)
3561

    
3562

    
3563
class JobExecutor(object):
3564
  """Class which manages the submission and execution of multiple jobs.
3565

3566
  Note that instances of this class should not be reused between
3567
  GetResults() calls.
3568

3569
  """
3570
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3571
    self.queue = []
3572
    if cl is None:
3573
      cl = GetClient()
3574
    self.cl = cl
3575
    self.verbose = verbose
3576
    self.jobs = []
3577
    self.opts = opts
3578
    self.feedback_fn = feedback_fn
3579
    self._counter = itertools.count()
3580

    
3581
  @staticmethod
3582
  def _IfName(name, fmt):
3583
    """Helper function for formatting name.
3584

3585
    """
3586
    if name:
3587
      return fmt % name
3588

    
3589
    return ""
3590

    
3591
  def QueueJob(self, name, *ops):
3592
    """Record a job for later submit.
3593

3594
    @type name: string
3595
    @param name: a description of the job, will be used in WaitJobSet
3596

3597
    """
3598
    SetGenericOpcodeOpts(ops, self.opts)
3599
    self.queue.append((self._counter.next(), name, ops))
3600

    
3601
  def AddJobId(self, name, status, job_id):
3602
    """Adds a job ID to the internal queue.
3603

3604
    """
3605
    self.jobs.append((self._counter.next(), status, job_id, name))
3606

    
3607
  def SubmitPending(self, each=False):
3608
    """Submit all pending jobs.
3609

3610
    """
3611
    if each:
3612
      results = []
3613
      for (_, _, ops) in self.queue:
3614
        # SubmitJob will remove the success status, but raise an exception if
3615
        # the submission fails, so we'll notice that anyway.
3616
        results.append([True, self.cl.SubmitJob(ops)[0]])
3617
    else:
3618
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3619
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3620
      self.jobs.append((idx, status, data, name))
3621

    
3622
  def _ChooseJob(self):
3623
    """Choose a non-waiting/queued job to poll next.
3624

3625
    """
3626
    assert self.jobs, "_ChooseJob called with empty job list"
3627

    
3628
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3629
                               ["status"])
3630
    assert result
3631

    
3632
    for job_data, status in zip(self.jobs, result):
3633
      if (isinstance(status, list) and status and
3634
          status[0] in (constants.JOB_STATUS_QUEUED,
3635
                        constants.JOB_STATUS_WAITING,
3636
                        constants.JOB_STATUS_CANCELING)):
3637
        # job is still present and waiting
3638
        continue
3639
      # good candidate found (either running job or lost job)
3640
      self.jobs.remove(job_data)
3641
      return job_data
3642

    
3643
    # no job found
3644
    return self.jobs.pop(0)
3645

    
3646
  def GetResults(self):
3647
    """Wait for and return the results of all jobs.
3648

3649
    @rtype: list
3650
    @return: list of tuples (success, job results), in the same order
3651
        as the submitted jobs; if a job has failed, instead of the result
3652
        there will be the error message
3653

3654
    """
3655
    if not self.jobs:
3656
      self.SubmitPending()
3657
    results = []
3658
    if self.verbose:
3659
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3660
      if ok_jobs:
3661
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3662

    
3663
    # first, remove any non-submitted jobs
3664
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3665
    for idx, _, jid, name in failures:
3666
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3667
      results.append((idx, False, jid))
3668

    
3669
    while self.jobs:
3670
      (idx, _, jid, name) = self._ChooseJob()
3671
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3672
      try:
3673
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3674
        success = True
3675
      except errors.JobLost, err:
3676
        _, job_result = FormatError(err)
3677
        ToStderr("Job %s%s has been archived, cannot check its result",
3678
                 jid, self._IfName(name, " for %s"))
3679
        success = False
3680
      except (errors.GenericError, luxi.ProtocolError), err:
3681
        _, job_result = FormatError(err)
3682
        success = False
3683
        # the error message will always be shown, verbose or not
3684
        ToStderr("Job %s%s has failed: %s",
3685
                 jid, self._IfName(name, " for %s"), job_result)
3686

    
3687
      results.append((idx, success, job_result))
3688

    
3689
    # sort based on the index, then drop it
3690
    results.sort()
3691
    results = [i[1:] for i in results]
3692

    
3693
    return results
3694

    
3695
  def WaitOrShow(self, wait):
3696
    """Wait for job results or only print the job IDs.
3697

3698
    @type wait: boolean
3699
    @param wait: whether to wait or not
3700

3701
    """
3702
    if wait:
3703
      return self.GetResults()
3704
    else:
3705
      if not self.jobs:
3706
        self.SubmitPending()
3707
      for _, status, result, name in self.jobs:
3708
        if status:
3709
          ToStdout("%s: %s", result, name)
3710
        else:
3711
          ToStderr("Failure for %s: %s", name, result)
3712
      return [row[1:3] for row in self.jobs]
3713

    
3714

    
3715
def FormatParamsDictInfo(param_dict, actual):
3716
  """Formats a parameter dictionary.
3717

3718
  @type param_dict: dict
3719
  @param param_dict: the own parameters
3720
  @type actual: dict
3721
  @param actual: the current parameter set (including defaults)
3722
  @rtype: dict
3723
  @return: dictionary where the value of each parameter is either a fully
3724
      formatted string or a dictionary containing formatted strings
3725

3726
  """
3727
  ret = {}
3728
  for (key, data) in actual.items():
3729
    if isinstance(data, dict) and data:
3730
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3731
    else:
3732
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3733
  return ret
3734

    
3735

    
3736
def _FormatListInfoDefault(data, def_data):
3737
  if data is not None:
3738
    ret = utils.CommaJoin(data)
3739
  else:
3740
    ret = "default (%s)" % utils.CommaJoin(def_data)
3741
  return ret
3742

    
3743

    
3744
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3745
  """Formats an instance policy.
3746

3747
  @type custom_ipolicy: dict
3748
  @param custom_ipolicy: own policy
3749
  @type eff_ipolicy: dict
3750
  @param eff_ipolicy: effective policy (including defaults); ignored for
3751
      cluster
3752
  @type iscluster: bool
3753
  @param iscluster: the policy is at cluster level
3754
  @rtype: list of pairs
3755
  @return: formatted data, suitable for L{PrintGenericInfo}
3756

3757
  """
3758
  if iscluster:
3759
    eff_ipolicy = custom_ipolicy
3760

    
3761
  minmax_out = []
3762
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3763
  if custom_minmax:
3764
    for (k, minmax) in enumerate(custom_minmax):
3765
      minmax_out.append([
3766
        ("%s/%s" % (key, k),
3767
         FormatParamsDictInfo(minmax[key], minmax[key]))
3768
        for key in constants.ISPECS_MINMAX_KEYS
3769
        ])
3770
  else:
3771
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3772
      minmax_out.append([
3773
        ("%s/%s" % (key, k),
3774
         FormatParamsDictInfo({}, minmax[key]))
3775
        for key in constants.ISPECS_MINMAX_KEYS
3776
        ])
3777
  ret = [("bounds specs", minmax_out)]
3778

    
3779
  if iscluster:
3780
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3781
    ret.append(
3782
      (constants.ISPECS_STD,
3783
       FormatParamsDictInfo(stdspecs, stdspecs))
3784
      )
3785

    
3786
  ret.append(
3787
    ("allowed disk templates",
3788
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3789
                            eff_ipolicy[constants.IPOLICY_DTS]))
3790
    )
3791
  ret.extend([
3792
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3793
    for key in constants.IPOLICY_PARAMETERS
3794
    ])
3795
  return ret
3796

    
3797

    
3798
def _PrintSpecsParameters(buf, specs):
3799
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3800
  buf.write(",".join(values))
3801

    
3802

    
3803
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3804
  """Print the command option used to generate the given instance policy.
3805

3806
  Currently only the parts dealing with specs are supported.
3807

3808
  @type buf: StringIO
3809
  @param buf: stream to write into
3810
  @type ipolicy: dict
3811
  @param ipolicy: instance policy
3812
  @type isgroup: bool
3813
  @param isgroup: whether the policy is at group level
3814

3815
  """
3816
  if not isgroup:
3817
    stdspecs = ipolicy.get("std")
3818
    if stdspecs:
3819
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3820
      _PrintSpecsParameters(buf, stdspecs)
3821
  minmaxes = ipolicy.get("minmax", [])
3822
  first = True
3823
  for minmax in minmaxes:
3824
    minspecs = minmax.get("min")
3825
    maxspecs = minmax.get("max")
3826
    if minspecs and maxspecs:
3827
      if first:
3828
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3829
        first = False
3830
      else:
3831
        buf.write("//")
3832
      buf.write("min:")
3833
      _PrintSpecsParameters(buf, minspecs)
3834
      buf.write("/max:")
3835
      _PrintSpecsParameters(buf, maxspecs)
3836

    
3837

    
3838
def ConfirmOperation(names, list_type, text, extra=""):
3839
  """Ask the user to confirm an operation on a list of list_type.
3840

3841
  This function is used to request confirmation for doing an operation
3842
  on a given list of list_type.
3843

3844
  @type names: list
3845
  @param names: the list of names that we display when
3846
      we ask for confirmation
3847
  @type list_type: str
3848
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3849
  @type text: str
3850
  @param text: the operation that the user should confirm
3851
  @rtype: boolean
3852
  @return: True or False depending on user's confirmation.
3853

3854
  """
3855
  count = len(names)
3856
  msg = ("The %s will operate on %d %s.\n%s"
3857
         "Do you want to continue?" % (text, count, list_type, extra))
3858
  affected = (("\nAffected %s:\n" % list_type) +
3859
              "\n".join(["  %s" % name for name in names]))
3860

    
3861
  choices = [("y", True, "Yes, execute the %s" % text),
3862
             ("n", False, "No, abort the %s" % text)]
3863

    
3864
  if count > 20:
3865
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3866
    question = msg
3867
  else:
3868
    question = msg + affected
3869

    
3870
  choice = AskUser(question, choices)
3871
  if choice == "v":
3872
    choices.pop(1)
3873
    choice = AskUser(msg + affected, choices)
3874
  return choice
3875

    
3876

    
3877
def _MaybeParseUnit(elements):
3878
  """Parses and returns an array of potential values with units.
3879

3880
  """
3881
  parsed = {}
3882
  for k, v in elements.items():
3883
    if v == constants.VALUE_DEFAULT:
3884
      parsed[k] = v
3885
    else:
3886
      parsed[k] = utils.ParseUnit(v)
3887
  return parsed
3888

    
3889

    
3890
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3891
                             ispecs_disk_count, ispecs_disk_size,
3892
                             ispecs_nic_count, group_ipolicy, fill_all):
3893
  try:
3894
    if ispecs_mem_size:
3895
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3896
    if ispecs_disk_size:
3897
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3898
  except (TypeError, ValueError, errors.UnitParseError), err:
3899
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3900
                               " in policy: %s" %
3901
                               (ispecs_disk_size, ispecs_mem_size, err),
3902
                               errors.ECODE_INVAL)
3903

    
3904
  # prepare ipolicy dict
3905
  ispecs_transposed = {
3906
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3907
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3908
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3909
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3910
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3911
    }
3912

    
3913
  # first, check that the values given are correct
3914
  if group_ipolicy:
3915
    forced_type = TISPECS_GROUP_TYPES
3916
  else:
3917
    forced_type = TISPECS_CLUSTER_TYPES
3918
  for specs in ispecs_transposed.values():
3919
    assert type(specs) is dict
3920
    utils.ForceDictType(specs, forced_type)
3921

    
3922
  # then transpose
3923
  ispecs = {
3924
    constants.ISPECS_MIN: {},
3925
    constants.ISPECS_MAX: {},
3926
    constants.ISPECS_STD: {},
3927
    }
3928
  for (name, specs) in ispecs_transposed.iteritems():
3929
    assert name in constants.ISPECS_PARAMETERS
3930
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3931
      assert key in ispecs
3932
      ispecs[key][name] = val
3933
  minmax_out = {}
3934
  for key in constants.ISPECS_MINMAX_KEYS:
3935
    if fill_all:
3936
      minmax_out[key] = \
3937
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3938
    else:
3939
      minmax_out[key] = ispecs[key]
3940
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3941
  if fill_all:
3942
    ipolicy[constants.ISPECS_STD] = \
3943
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3944
                         ispecs[constants.ISPECS_STD])
3945
  else:
3946
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3947

    
3948

    
3949
def _ParseSpecUnit(spec, keyname):
3950
  ret = spec.copy()
3951
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3952
    if k in ret:
3953
      try:
3954
        ret[k] = utils.ParseUnit(ret[k])
3955
      except (TypeError, ValueError, errors.UnitParseError), err:
3956
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3957
                                    " specs: %s" % (k, ret[k], keyname, err)),
3958
                                   errors.ECODE_INVAL)
3959
  return ret
3960

    
3961

    
3962
def _ParseISpec(spec, keyname, required):
3963
  ret = _ParseSpecUnit(spec, keyname)
3964
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3965
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3966
  if required and missing:
3967
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3968
                               (keyname, utils.CommaJoin(missing)),
3969
                               errors.ECODE_INVAL)
3970
  return ret
3971

    
3972

    
3973
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3974
  ret = None
3975
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3976
      len(minmax_ispecs[0]) == 1):
3977
    for (key, spec) in minmax_ispecs[0].items():
3978
      # This loop is executed exactly once
3979
      if key in allowed_values and not spec:
3980
        ret = key
3981
  return ret
3982

    
3983

    
3984
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3985
                            group_ipolicy, allowed_values):
3986
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3987
  if found_allowed is not None:
3988
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3989
  elif minmax_ispecs is not None:
3990
    minmax_out = []
3991
    for mmpair in minmax_ispecs:
3992
      mmpair_out = {}
3993
      for (key, spec) in mmpair.items():
3994
        if key not in constants.ISPECS_MINMAX_KEYS:
3995
          msg = "Invalid key in bounds instance specifications: %s" % key
3996
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3997
        mmpair_out[key] = _ParseISpec(spec, key, True)
3998
      minmax_out.append(mmpair_out)
3999
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4000
  if std_ispecs is not None:
4001
    assert not group_ipolicy # This is not an option for gnt-group
4002
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4003

    
4004

    
4005
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4006
                          ispecs_cpu_count=None,
4007
                          ispecs_disk_count=None,
4008
                          ispecs_disk_size=None,
4009
                          ispecs_nic_count=None,
4010
                          minmax_ispecs=None,
4011
                          std_ispecs=None,
4012
                          ipolicy_disk_templates=None,
4013
                          ipolicy_vcpu_ratio=None,
4014
                          ipolicy_spindle_ratio=None,
4015
                          group_ipolicy=False,
4016
                          allowed_values=None,
4017
                          fill_all=False):
4018
  """Creation of instance policy based on command line options.
4019

4020
  @param fill_all: whether for cluster policies we should ensure that
4021
    all values are filled
4022

4023
  """
4024
  assert not (fill_all and allowed_values)
4025

    
4026
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4027
                 ispecs_disk_size or ispecs_nic_count)
4028
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4029
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4030
                               " together with any --ipolicy-xxx-specs option",
4031
                               errors.ECODE_INVAL)
4032

    
4033
  ipolicy_out = objects.MakeEmptyIPolicy()
4034
  if split_specs:
4035
    assert fill_all
4036
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4037
                             ispecs_disk_count, ispecs_disk_size,
4038
                             ispecs_nic_count, group_ipolicy, fill_all)
4039
  elif (minmax_ispecs is not None or std_ispecs is not None):
4040
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4041
                            group_ipolicy, allowed_values)
4042

    
4043
  if ipolicy_disk_templates is not None:
4044
    if allowed_values and ipolicy_disk_templates in allowed_values:
4045
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4046
    else:
4047
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4048
  if ipolicy_vcpu_ratio is not None:
4049
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4050
  if ipolicy_spindle_ratio is not None:
4051
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4052

    
4053
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4054

    
4055
  if not group_ipolicy and fill_all:
4056
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4057

    
4058
  return ipolicy_out
4059

    
4060

    
4061
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4062
  """Formatting core of L{PrintGenericInfo}.
4063

4064
  @param buf: (string) stream to accumulate the result into
4065
  @param data: data to format
4066
  @type level: int
4067
  @param level: depth in the data hierarchy, used for indenting
4068
  @type afterkey: bool
4069
  @param afterkey: True when we are in the middle of a line after a key (used
4070
      to properly add newlines or indentation)
4071

4072
  """
4073
  baseind = "  "
4074
  if isinstance(data, dict):
4075
    if not data:
4076
      buf.write("\n")
4077
    else:
4078
      if afterkey:
4079
        buf.write("\n")
4080
        doindent = True
4081
      else:
4082
        doindent = False
4083
      for key in sorted(data):
4084
        if doindent:
4085
          buf.write(baseind * level)
4086
        else:
4087
          doindent = True
4088
        buf.write(key)
4089
        buf.write(": ")
4090
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4091
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4092
    # list of tuples (an ordered dictionary)
4093
    if afterkey:
4094
      buf.write("\n")
4095
      doindent = True
4096
    else:
4097
      doindent = False
4098
    for (key, val) in data:
4099
      if doindent:
4100
        buf.write(baseind * level)
4101
      else:
4102
        doindent = True
4103
      buf.write(key)
4104
      buf.write(": ")
4105
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4106
  elif isinstance(data, list):
4107
    if not data:
4108
      buf.write("\n")
4109
    else:
4110
      if afterkey:
4111
        buf.write("\n")
4112
        doindent = True
4113
      else:
4114
        doindent = False
4115
      for item in data:
4116
        if doindent:
4117
          buf.write(baseind * level)
4118
        else:
4119
          doindent = True
4120
        buf.write("-")
4121
        buf.write(baseind[1:])
4122
        _SerializeGenericInfo(buf, item, level + 1)
4123
  else:
4124
    # This branch should be only taken for strings, but it's practically
4125
    # impossible to guarantee that no other types are produced somewhere
4126
    buf.write(str(data))
4127
    buf.write("\n")
4128

    
4129

    
4130
def PrintGenericInfo(data):
4131
  """Print information formatted according to the hierarchy.
4132

4133
  The output is a valid YAML string.
4134

4135
  @param data: the data to print. It's a hierarchical structure whose elements
4136
      can be:
4137
        - dictionaries, where keys are strings and values are of any of the
4138
          types listed here
4139
        - lists of pairs (key, value), where key is a string and value is of
4140
          any of the types listed here; it's a way to encode ordered
4141
          dictionaries
4142
        - lists of any of the types listed here
4143
        - strings
4144

4145
  """
4146
  buf = StringIO()
4147
  _SerializeGenericInfo(buf, data, 0)
4148
  ToStdout(buf.getvalue().rstrip("\n"))