Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 8ca22fef

History | View | Annotate | Download (136.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INCLUDEDEFAULTS_OPT",
112
  "INTERVAL_OPT",
113
  "MAC_PREFIX_OPT",
114
  "MAINTAIN_NODE_HEALTH_OPT",
115
  "MASTER_NETDEV_OPT",
116
  "MASTER_NETMASK_OPT",
117
  "MC_OPT",
118
  "MIGRATION_MODE_OPT",
119
  "MODIFY_ETCHOSTS_OPT",
120
  "NET_OPT",
121
  "NETWORK_OPT",
122
  "NETWORK6_OPT",
123
  "NEW_CLUSTER_CERT_OPT",
124
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
125
  "NEW_CONFD_HMAC_KEY_OPT",
126
  "NEW_RAPI_CERT_OPT",
127
  "NEW_PRIMARY_OPT",
128
  "NEW_SECONDARY_OPT",
129
  "NEW_SPICE_CERT_OPT",
130
  "NIC_PARAMS_OPT",
131
  "NOCONFLICTSCHECK_OPT",
132
  "NODE_FORCE_JOIN_OPT",
133
  "NODE_LIST_OPT",
134
  "NODE_PLACEMENT_OPT",
135
  "NODEGROUP_OPT",
136
  "NODE_PARAMS_OPT",
137
  "NODE_POWERED_OPT",
138
  "NOHDR_OPT",
139
  "NOIPCHECK_OPT",
140
  "NO_INSTALL_OPT",
141
  "NONAMECHECK_OPT",
142
  "NOMODIFY_ETCHOSTS_OPT",
143
  "NOMODIFY_SSH_SETUP_OPT",
144
  "NONICS_OPT",
145
  "NONLIVE_OPT",
146
  "NONPLUS1_OPT",
147
  "NORUNTIME_CHGS_OPT",
148
  "NOSHUTDOWN_OPT",
149
  "NOSTART_OPT",
150
  "NOSSH_KEYCHECK_OPT",
151
  "NOVOTING_OPT",
152
  "NO_REMEMBER_OPT",
153
  "NWSYNC_OPT",
154
  "OFFLINE_INST_OPT",
155
  "ONLINE_INST_OPT",
156
  "ON_PRIMARY_OPT",
157
  "ON_SECONDARY_OPT",
158
  "OFFLINE_OPT",
159
  "OSPARAMS_OPT",
160
  "OS_OPT",
161
  "OS_SIZE_OPT",
162
  "OOB_TIMEOUT_OPT",
163
  "POWER_DELAY_OPT",
164
  "PREALLOC_WIPE_DISKS_OPT",
165
  "PRIMARY_IP_VERSION_OPT",
166
  "PRIMARY_ONLY_OPT",
167
  "PRINT_JOBID_OPT",
168
  "PRIORITY_OPT",
169
  "RAPI_CERT_OPT",
170
  "READD_OPT",
171
  "REASON_OPT",
172
  "REBOOT_TYPE_OPT",
173
  "REMOVE_INSTANCE_OPT",
174
  "REMOVE_RESERVED_IPS_OPT",
175
  "REMOVE_UIDS_OPT",
176
  "RESERVED_LVS_OPT",
177
  "RUNTIME_MEM_OPT",
178
  "ROMAN_OPT",
179
  "SECONDARY_IP_OPT",
180
  "SECONDARY_ONLY_OPT",
181
  "SELECT_OS_OPT",
182
  "SEP_OPT",
183
  "SHOWCMD_OPT",
184
  "SHOW_MACHINE_OPT",
185
  "COMPRESS_OPT",
186
  "SHUTDOWN_TIMEOUT_OPT",
187
  "SINGLE_NODE_OPT",
188
  "SPECS_CPU_COUNT_OPT",
189
  "SPECS_DISK_COUNT_OPT",
190
  "SPECS_DISK_SIZE_OPT",
191
  "SPECS_MEM_SIZE_OPT",
192
  "SPECS_NIC_COUNT_OPT",
193
  "SPLIT_ISPECS_OPTS",
194
  "IPOLICY_STD_SPECS_OPT",
195
  "IPOLICY_DISK_TEMPLATES",
196
  "IPOLICY_VCPU_RATIO",
197
  "SPICE_CACERT_OPT",
198
  "SPICE_CERT_OPT",
199
  "SRC_DIR_OPT",
200
  "SRC_NODE_OPT",
201
  "SUBMIT_OPT",
202
  "SUBMIT_OPTS",
203
  "STARTUP_PAUSED_OPT",
204
  "STATIC_OPT",
205
  "SYNC_OPT",
206
  "TAG_ADD_OPT",
207
  "TAG_SRC_OPT",
208
  "TIMEOUT_OPT",
209
  "TO_GROUP_OPT",
210
  "UIDPOOL_OPT",
211
  "USEUNITS_OPT",
212
  "USE_EXTERNAL_MIP_SCRIPT",
213
  "USE_REPL_NET_OPT",
214
  "VERBOSE_OPT",
215
  "VG_NAME_OPT",
216
  "WFSYNC_OPT",
217
  "YES_DOIT_OPT",
218
  "DISK_STATE_OPT",
219
  "HV_STATE_OPT",
220
  "IGNORE_IPOLICY_OPT",
221
  "INSTANCE_POLICY_OPTS",
222
  # Generic functions for CLI programs
223
  "ConfirmOperation",
224
  "CreateIPolicyFromOpts",
225
  "GenericMain",
226
  "GenericInstanceCreate",
227
  "GenericList",
228
  "GenericListFields",
229
  "GetClient",
230
  "GetOnlineNodes",
231
  "JobExecutor",
232
  "JobSubmittedException",
233
  "ParseTimespec",
234
  "RunWhileClusterStopped",
235
  "SubmitOpCode",
236
  "SubmitOpCodeToDrainedQueue",
237
  "SubmitOrSend",
238
  "UsesRPC",
239
  # Formatting functions
240
  "ToStderr", "ToStdout",
241
  "FormatError",
242
  "FormatQueryResult",
243
  "FormatParamsDictInfo",
244
  "FormatPolicyInfo",
245
  "PrintIPolicyCommand",
246
  "PrintGenericInfo",
247
  "GenerateTable",
248
  "AskUser",
249
  "FormatTimestamp",
250
  "FormatLogMessage",
251
  # Tags functions
252
  "ListTags",
253
  "AddTags",
254
  "RemoveTags",
255
  # command line options support infrastructure
256
  "ARGS_MANY_INSTANCES",
257
  "ARGS_MANY_NODES",
258
  "ARGS_MANY_GROUPS",
259
  "ARGS_MANY_NETWORKS",
260
  "ARGS_NONE",
261
  "ARGS_ONE_INSTANCE",
262
  "ARGS_ONE_NODE",
263
  "ARGS_ONE_GROUP",
264
  "ARGS_ONE_OS",
265
  "ARGS_ONE_NETWORK",
266
  "ArgChoice",
267
  "ArgCommand",
268
  "ArgFile",
269
  "ArgGroup",
270
  "ArgHost",
271
  "ArgInstance",
272
  "ArgJobId",
273
  "ArgNetwork",
274
  "ArgNode",
275
  "ArgOs",
276
  "ArgExtStorage",
277
  "ArgSuggest",
278
  "ArgUnknown",
279
  "OPT_COMPL_INST_ADD_NODES",
280
  "OPT_COMPL_MANY_NODES",
281
  "OPT_COMPL_ONE_IALLOCATOR",
282
  "OPT_COMPL_ONE_INSTANCE",
283
  "OPT_COMPL_ONE_NODE",
284
  "OPT_COMPL_ONE_NODEGROUP",
285
  "OPT_COMPL_ONE_NETWORK",
286
  "OPT_COMPL_ONE_OS",
287
  "OPT_COMPL_ONE_EXTSTORAGE",
288
  "cli_option",
289
  "SplitNodeOption",
290
  "CalculateOSNames",
291
  "ParseFields",
292
  "COMMON_CREATE_OPTS",
293
  ]
294

    
295
NO_PREFIX = "no_"
296
UN_PREFIX = "-"
297

    
298
#: Priorities (sorted)
299
_PRIORITY_NAMES = [
300
  ("low", constants.OP_PRIO_LOW),
301
  ("normal", constants.OP_PRIO_NORMAL),
302
  ("high", constants.OP_PRIO_HIGH),
303
  ]
304

    
305
#: Priority dictionary for easier lookup
306
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
307
# we migrate to Python 2.6
308
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
309

    
310
# Query result status for clients
311
(QR_NORMAL,
312
 QR_UNKNOWN,
313
 QR_INCOMPLETE) = range(3)
314

    
315
#: Maximum batch size for ChooseJob
316
_CHOOSE_BATCH = 25
317

    
318

    
319
# constants used to create InstancePolicy dictionary
320
TISPECS_GROUP_TYPES = {
321
  constants.ISPECS_MIN: constants.VTYPE_INT,
322
  constants.ISPECS_MAX: constants.VTYPE_INT,
323
  }
324

    
325
TISPECS_CLUSTER_TYPES = {
326
  constants.ISPECS_MIN: constants.VTYPE_INT,
327
  constants.ISPECS_MAX: constants.VTYPE_INT,
328
  constants.ISPECS_STD: constants.VTYPE_INT,
329
  }
330

    
331
#: User-friendly names for query2 field types
332
_QFT_NAMES = {
333
  constants.QFT_UNKNOWN: "Unknown",
334
  constants.QFT_TEXT: "Text",
335
  constants.QFT_BOOL: "Boolean",
336
  constants.QFT_NUMBER: "Number",
337
  constants.QFT_UNIT: "Storage size",
338
  constants.QFT_TIMESTAMP: "Timestamp",
339
  constants.QFT_OTHER: "Custom",
340
  }
341

    
342

    
343
class _Argument:
344
  def __init__(self, min=0, max=None): # pylint: disable=W0622
345
    self.min = min
346
    self.max = max
347

    
348
  def __repr__(self):
349
    return ("<%s min=%s max=%s>" %
350
            (self.__class__.__name__, self.min, self.max))
351

    
352

    
353
class ArgSuggest(_Argument):
354
  """Suggesting argument.
355

356
  Value can be any of the ones passed to the constructor.
357

358
  """
359
  # pylint: disable=W0622
360
  def __init__(self, min=0, max=None, choices=None):
361
    _Argument.__init__(self, min=min, max=max)
362
    self.choices = choices
363

    
364
  def __repr__(self):
365
    return ("<%s min=%s max=%s choices=%r>" %
366
            (self.__class__.__name__, self.min, self.max, self.choices))
367

    
368

    
369
class ArgChoice(ArgSuggest):
370
  """Choice argument.
371

372
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
373
  but value must be one of the choices.
374

375
  """
376

    
377

    
378
class ArgUnknown(_Argument):
379
  """Unknown argument to program (e.g. determined at runtime).
380

381
  """
382

    
383

    
384
class ArgInstance(_Argument):
385
  """Instances argument.
386

387
  """
388

    
389

    
390
class ArgNode(_Argument):
391
  """Node argument.
392

393
  """
394

    
395

    
396
class ArgNetwork(_Argument):
397
  """Network argument.
398

399
  """
400

    
401

    
402
class ArgGroup(_Argument):
403
  """Node group argument.
404

405
  """
406

    
407

    
408
class ArgJobId(_Argument):
409
  """Job ID argument.
410

411
  """
412

    
413

    
414
class ArgFile(_Argument):
415
  """File path argument.
416

417
  """
418

    
419

    
420
class ArgCommand(_Argument):
421
  """Command argument.
422

423
  """
424

    
425

    
426
class ArgHost(_Argument):
427
  """Host argument.
428

429
  """
430

    
431

    
432
class ArgOs(_Argument):
433
  """OS argument.
434

435
  """
436

    
437

    
438
class ArgExtStorage(_Argument):
439
  """ExtStorage argument.
440

441
  """
442

    
443

    
444
ARGS_NONE = []
445
ARGS_MANY_INSTANCES = [ArgInstance()]
446
ARGS_MANY_NETWORKS = [ArgNetwork()]
447
ARGS_MANY_NODES = [ArgNode()]
448
ARGS_MANY_GROUPS = [ArgGroup()]
449
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
450
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
451
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
452
# TODO
453
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
454
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
455

    
456

    
457
def _ExtractTagsObject(opts, args):
458
  """Extract the tag type object.
459

460
  Note that this function will modify its args parameter.
461

462
  """
463
  if not hasattr(opts, "tag_type"):
464
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
465
  kind = opts.tag_type
466
  if kind == constants.TAG_CLUSTER:
467
    retval = kind, ""
468
  elif kind in (constants.TAG_NODEGROUP,
469
                constants.TAG_NODE,
470
                constants.TAG_NETWORK,
471
                constants.TAG_INSTANCE):
472
    if not args:
473
      raise errors.OpPrereqError("no arguments passed to the command",
474
                                 errors.ECODE_INVAL)
475
    name = args.pop(0)
476
    retval = kind, name
477
  else:
478
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
479
  return retval
480

    
481

    
482
def _ExtendTags(opts, args):
483
  """Extend the args if a source file has been given.
484

485
  This function will extend the tags with the contents of the file
486
  passed in the 'tags_source' attribute of the opts parameter. A file
487
  named '-' will be replaced by stdin.
488

489
  """
490
  fname = opts.tags_source
491
  if fname is None:
492
    return
493
  if fname == "-":
494
    new_fh = sys.stdin
495
  else:
496
    new_fh = open(fname, "r")
497
  new_data = []
498
  try:
499
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
500
    # because of python bug 1633941
501
    while True:
502
      line = new_fh.readline()
503
      if not line:
504
        break
505
      new_data.append(line.strip())
506
  finally:
507
    new_fh.close()
508
  args.extend(new_data)
509

    
510

    
511
def ListTags(opts, args):
512
  """List the tags on a given object.
513

514
  This is a generic implementation that knows how to deal with all
515
  three cases of tag objects (cluster, node, instance). The opts
516
  argument is expected to contain a tag_type field denoting what
517
  object type we work on.
518

519
  """
520
  kind, name = _ExtractTagsObject(opts, args)
521
  cl = GetClient(query=True)
522
  result = cl.QueryTags(kind, name)
523
  result = list(result)
524
  result.sort()
525
  for tag in result:
526
    ToStdout(tag)
527

    
528

    
529
def AddTags(opts, args):
530
  """Add tags on a given object.
531

532
  This is a generic implementation that knows how to deal with all
533
  three cases of tag objects (cluster, node, instance). The opts
534
  argument is expected to contain a tag_type field denoting what
535
  object type we work on.
536

537
  """
538
  kind, name = _ExtractTagsObject(opts, args)
539
  _ExtendTags(opts, args)
540
  if not args:
541
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
542
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
543
  SubmitOrSend(op, opts)
544

    
545

    
546
def RemoveTags(opts, args):
547
  """Remove tags from a given object.
548

549
  This is a generic implementation that knows how to deal with all
550
  three cases of tag objects (cluster, node, instance). The opts
551
  argument is expected to contain a tag_type field denoting what
552
  object type we work on.
553

554
  """
555
  kind, name = _ExtractTagsObject(opts, args)
556
  _ExtendTags(opts, args)
557
  if not args:
558
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
559
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
560
  SubmitOrSend(op, opts)
561

    
562

    
563
def check_unit(option, opt, value): # pylint: disable=W0613
564
  """OptParsers custom converter for units.
565

566
  """
567
  try:
568
    return utils.ParseUnit(value)
569
  except errors.UnitParseError, err:
570
    raise OptionValueError("option %s: %s" % (opt, err))
571

    
572

    
573
def _SplitKeyVal(opt, data, parse_prefixes):
574
  """Convert a KeyVal string into a dict.
575

576
  This function will convert a key=val[,...] string into a dict. Empty
577
  values will be converted specially: keys which have the prefix 'no_'
578
  will have the value=False and the prefix stripped, keys with the prefix
579
  "-" will have value=None and the prefix stripped, and the others will
580
  have value=True.
581

582
  @type opt: string
583
  @param opt: a string holding the option name for which we process the
584
      data, used in building error messages
585
  @type data: string
586
  @param data: a string of the format key=val,key=val,...
587
  @type parse_prefixes: bool
588
  @param parse_prefixes: whether to handle prefixes specially
589
  @rtype: dict
590
  @return: {key=val, key=val}
591
  @raises errors.ParameterError: if there are duplicate keys
592

593
  """
594
  kv_dict = {}
595
  if data:
596
    for elem in utils.UnescapeAndSplit(data, sep=","):
597
      if "=" in elem:
598
        key, val = elem.split("=", 1)
599
      elif parse_prefixes:
600
        if elem.startswith(NO_PREFIX):
601
          key, val = elem[len(NO_PREFIX):], False
602
        elif elem.startswith(UN_PREFIX):
603
          key, val = elem[len(UN_PREFIX):], None
604
        else:
605
          key, val = elem, True
606
      else:
607
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
608
                                    (elem, opt))
609
      if key in kv_dict:
610
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
611
                                    (key, opt))
612
      kv_dict[key] = val
613
  return kv_dict
614

    
615

    
616
def _SplitIdentKeyVal(opt, value, parse_prefixes):
617
  """Helper function to parse "ident:key=val,key=val" options.
618

619
  @type opt: string
620
  @param opt: option name, used in error messages
621
  @type value: string
622
  @param value: expected to be in the format "ident:key=val,key=val,..."
623
  @type parse_prefixes: bool
624
  @param parse_prefixes: whether to handle prefixes specially (see
625
      L{_SplitKeyVal})
626
  @rtype: tuple
627
  @return: (ident, {key=val, key=val})
628
  @raises errors.ParameterError: in case of duplicates or other parsing errors
629

630
  """
631
  if ":" not in value:
632
    ident, rest = value, ""
633
  else:
634
    ident, rest = value.split(":", 1)
635

    
636
  if parse_prefixes and ident.startswith(NO_PREFIX):
637
    if rest:
638
      msg = "Cannot pass options when removing parameter groups: %s" % value
639
      raise errors.ParameterError(msg)
640
    retval = (ident[len(NO_PREFIX):], False)
641
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
642
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
643
    if rest:
644
      msg = "Cannot pass options when removing parameter groups: %s" % value
645
      raise errors.ParameterError(msg)
646
    retval = (ident[len(UN_PREFIX):], None)
647
  else:
648
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
649
    retval = (ident, kv_dict)
650
  return retval
651

    
652

    
653
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
654
  """Custom parser for ident:key=val,key=val options.
655

656
  This will store the parsed values as a tuple (ident, {key: val}). As such,
657
  multiple uses of this option via action=append is possible.
658

659
  """
660
  return _SplitIdentKeyVal(opt, value, True)
661

    
662

    
663
def check_key_val(option, opt, value):  # pylint: disable=W0613
664
  """Custom parser class for key=val,key=val options.
665

666
  This will store the parsed values as a dict {key: val}.
667

668
  """
669
  return _SplitKeyVal(opt, value, True)
670

    
671

    
672
def _SplitListKeyVal(opt, value):
673
  retval = {}
674
  for elem in value.split("/"):
675
    if not elem:
676
      raise errors.ParameterError("Empty section in option '%s'" % opt)
677
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
678
    if ident in retval:
679
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
680
             (ident, opt, elem))
681
      raise errors.ParameterError(msg)
682
    retval[ident] = valdict
683
  return retval
684

    
685

    
686
def check_multilist_ident_key_val(_, opt, value):
687
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
688

689
  @rtype: list of dictionary
690
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
691

692
  """
693
  retval = []
694
  for line in value.split("//"):
695
    retval.append(_SplitListKeyVal(opt, line))
696
  return retval
697

    
698

    
699
def check_bool(option, opt, value): # pylint: disable=W0613
700
  """Custom parser for yes/no options.
701

702
  This will store the parsed value as either True or False.
703

704
  """
705
  value = value.lower()
706
  if value == constants.VALUE_FALSE or value == "no":
707
    return False
708
  elif value == constants.VALUE_TRUE or value == "yes":
709
    return True
710
  else:
711
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
712

    
713

    
714
def check_list(option, opt, value): # pylint: disable=W0613
715
  """Custom parser for comma-separated lists.
716

717
  """
718
  # we have to make this explicit check since "".split(",") is [""],
719
  # not an empty list :(
720
  if not value:
721
    return []
722
  else:
723
    return utils.UnescapeAndSplit(value)
724

    
725

    
726
def check_maybefloat(option, opt, value): # pylint: disable=W0613
727
  """Custom parser for float numbers which might be also defaults.
728

729
  """
730
  value = value.lower()
731

    
732
  if value == constants.VALUE_DEFAULT:
733
    return value
734
  else:
735
    return float(value)
736

    
737

    
738
# completion_suggestion is normally a list. Using numeric values not evaluating
739
# to False for dynamic completion.
740
(OPT_COMPL_MANY_NODES,
741
 OPT_COMPL_ONE_NODE,
742
 OPT_COMPL_ONE_INSTANCE,
743
 OPT_COMPL_ONE_OS,
744
 OPT_COMPL_ONE_EXTSTORAGE,
745
 OPT_COMPL_ONE_IALLOCATOR,
746
 OPT_COMPL_ONE_NETWORK,
747
 OPT_COMPL_INST_ADD_NODES,
748
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
749

    
750
OPT_COMPL_ALL = compat.UniqueFrozenset([
751
  OPT_COMPL_MANY_NODES,
752
  OPT_COMPL_ONE_NODE,
753
  OPT_COMPL_ONE_INSTANCE,
754
  OPT_COMPL_ONE_OS,
755
  OPT_COMPL_ONE_EXTSTORAGE,
756
  OPT_COMPL_ONE_IALLOCATOR,
757
  OPT_COMPL_ONE_NETWORK,
758
  OPT_COMPL_INST_ADD_NODES,
759
  OPT_COMPL_ONE_NODEGROUP,
760
  ])
761

    
762

    
763
class CliOption(Option):
764
  """Custom option class for optparse.
765

766
  """
767
  ATTRS = Option.ATTRS + [
768
    "completion_suggest",
769
    ]
770
  TYPES = Option.TYPES + (
771
    "multilistidentkeyval",
772
    "identkeyval",
773
    "keyval",
774
    "unit",
775
    "bool",
776
    "list",
777
    "maybefloat",
778
    )
779
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
780
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
781
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
782
  TYPE_CHECKER["keyval"] = check_key_val
783
  TYPE_CHECKER["unit"] = check_unit
784
  TYPE_CHECKER["bool"] = check_bool
785
  TYPE_CHECKER["list"] = check_list
786
  TYPE_CHECKER["maybefloat"] = check_maybefloat
787

    
788

    
789
# optparse.py sets make_option, so we do it for our own option class, too
790
cli_option = CliOption
791

    
792

    
793
_YORNO = "yes|no"
794

    
795
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
796
                       help="Increase debugging level")
797

    
798
NOHDR_OPT = cli_option("--no-headers", default=False,
799
                       action="store_true", dest="no_headers",
800
                       help="Don't display column headers")
801

    
802
SEP_OPT = cli_option("--separator", default=None,
803
                     action="store", dest="separator",
804
                     help=("Separator between output fields"
805
                           " (defaults to one space)"))
806

    
807
USEUNITS_OPT = cli_option("--units", default=None,
808
                          dest="units", choices=("h", "m", "g", "t"),
809
                          help="Specify units for output (one of h/m/g/t)")
810

    
811
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
812
                        type="string", metavar="FIELDS",
813
                        help="Comma separated list of output fields")
814

    
815
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
816
                       default=False, help="Force the operation")
817

    
818
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
819
                         default=False, help="Do not require confirmation")
820

    
821
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
822
                                  action="store_true", default=False,
823
                                  help=("Ignore offline nodes and do as much"
824
                                        " as possible"))
825

    
826
TAG_ADD_OPT = cli_option("--tags", dest="tags",
827
                         default=None, help="Comma-separated list of instance"
828
                                            " tags")
829

    
830
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
831
                         default=None, help="File with tag names")
832

    
833
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
834
                        default=False, action="store_true",
835
                        help=("Submit the job and return the job ID, but"
836
                              " don't wait for the job to finish"))
837

    
838
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
839
                             default=False, action="store_true",
840
                             help=("Additionally print the job as first line"
841
                                   " on stdout (for scripting)."))
842

    
843
SYNC_OPT = cli_option("--sync", dest="do_locking",
844
                      default=False, action="store_true",
845
                      help=("Grab locks while doing the queries"
846
                            " in order to ensure more consistent results"))
847

    
848
DRY_RUN_OPT = cli_option("--dry-run", default=False,
849
                         action="store_true",
850
                         help=("Do not execute the operation, just run the"
851
                               " check steps and verify if it could be"
852
                               " executed"))
853

    
854
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
855
                         action="store_true",
856
                         help="Increase the verbosity of the operation")
857

    
858
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
859
                              action="store_true", dest="simulate_errors",
860
                              help="Debugging option that makes the operation"
861
                              " treat most runtime checks as failed")
862

    
863
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
864
                        default=True, action="store_false",
865
                        help="Don't wait for sync (DANGEROUS!)")
866

    
867
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
868
                        default=False, action="store_true",
869
                        help="Wait for disks to sync")
870

    
871
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
872
                             action="store_true", default=False,
873
                             help="Enable offline instance")
874

    
875
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
876
                              action="store_true", default=False,
877
                              help="Disable down instance")
878

    
879
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
880
                               help=("Custom disk setup (%s)" %
881
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
882
                               default=None, metavar="TEMPL",
883
                               choices=list(constants.DISK_TEMPLATES))
884

    
885
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
886
                        help="Do not create any network cards for"
887
                        " the instance")
888

    
889
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
890
                               help="Relative path under default cluster-wide"
891
                               " file storage dir to store file-based disks",
892
                               default=None, metavar="<DIR>")
893

    
894
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
895
                                  help="Driver to use for image files",
896
                                  default=None, metavar="<DRIVER>",
897
                                  choices=list(constants.FILE_DRIVER))
898

    
899
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
900
                            help="Select nodes for the instance automatically"
901
                            " using the <NAME> iallocator plugin",
902
                            default=None, type="string",
903
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
904

    
905
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
906
                                    metavar="<NAME>",
907
                                    help="Set the default instance"
908
                                    " allocator plugin",
909
                                    default=None, type="string",
910
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
911

    
912
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
913
                    metavar="<os>",
914
                    completion_suggest=OPT_COMPL_ONE_OS)
915

    
916
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
917
                          type="keyval", default={},
918
                          help="OS parameters")
919

    
920
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
921
                               action="store_true", default=False,
922
                               help="Force an unknown variant")
923

    
924
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
925
                            action="store_true", default=False,
926
                            help="Do not install the OS (will"
927
                            " enable no-start)")
928

    
929
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
930
                                dest="allow_runtime_chgs",
931
                                default=True, action="store_false",
932
                                help="Don't allow runtime changes")
933

    
934
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
935
                         type="keyval", default={},
936
                         help="Backend parameters")
937

    
938
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
939
                        default={}, dest="hvparams",
940
                        help="Hypervisor parameters")
941

    
942
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
943
                             help="Disk template parameters, in the format"
944
                             " template:option=value,option=value,...",
945
                             type="identkeyval", action="append", default=[])
946

    
947
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
948
                                 type="keyval", default={},
949
                                 help="Memory size specs: list of key=value,"
950
                                " where key is one of min, max, std"
951
                                 " (in MB or using a unit)")
952

    
953
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
954
                                 type="keyval", default={},
955
                                 help="CPU count specs: list of key=value,"
956
                                 " where key is one of min, max, std")
957

    
958
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
959
                                  dest="ispecs_disk_count",
960
                                  type="keyval", default={},
961
                                  help="Disk count specs: list of key=value,"
962
                                  " where key is one of min, max, std")
963

    
964
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
965
                                 type="keyval", default={},
966
                                 help="Disk size specs: list of key=value,"
967
                                 " where key is one of min, max, std"
968
                                 " (in MB or using a unit)")
969

    
970
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
971
                                 type="keyval", default={},
972
                                 help="NIC count specs: list of key=value,"
973
                                 " where key is one of min, max, std")
974

    
975
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
976
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
977
                                      dest="ipolicy_bounds_specs",
978
                                      type="multilistidentkeyval", default=None,
979
                                      help="Complete instance specs limits")
980

    
981
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
982
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
983
                                   dest="ipolicy_std_specs",
984
                                   type="keyval", default=None,
985
                                   help="Complte standard instance specs")
986

    
987
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
988
                                    dest="ipolicy_disk_templates",
989
                                    type="list", default=None,
990
                                    help="Comma-separated list of"
991
                                    " enabled disk templates")
992

    
993
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
994
                                 dest="ipolicy_vcpu_ratio",
995
                                 type="maybefloat", default=None,
996
                                 help="The maximum allowed vcpu-to-cpu ratio")
997

    
998
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
999
                                   dest="ipolicy_spindle_ratio",
1000
                                   type="maybefloat", default=None,
1001
                                   help=("The maximum allowed instances to"
1002
                                         " spindle ratio"))
1003

    
1004
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1005
                            help="Hypervisor and hypervisor options, in the"
1006
                            " format hypervisor:option=value,option=value,...",
1007
                            default=None, type="identkeyval")
1008

    
1009
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1010
                        help="Hypervisor and hypervisor options, in the"
1011
                        " format hypervisor:option=value,option=value,...",
1012
                        default=[], action="append", type="identkeyval")
1013

    
1014
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1015
                           action="store_false",
1016
                           help="Don't check that the instance's IP"
1017
                           " is alive")
1018

    
1019
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1020
                             default=True, action="store_false",
1021
                             help="Don't check that the instance's name"
1022
                             " is resolvable")
1023

    
1024
NET_OPT = cli_option("--net",
1025
                     help="NIC parameters", default=[],
1026
                     dest="nics", action="append", type="identkeyval")
1027

    
1028
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1029
                      dest="disks", action="append", type="identkeyval")
1030

    
1031
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1032
                         help="Comma-separated list of disks"
1033
                         " indices to act on (e.g. 0,2) (optional,"
1034
                         " defaults to all disks)")
1035

    
1036
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1037
                         help="Enforces a single-disk configuration using the"
1038
                         " given disk size, in MiB unless a suffix is used",
1039
                         default=None, type="unit", metavar="<size>")
1040

    
1041
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1042
                                dest="ignore_consistency",
1043
                                action="store_true", default=False,
1044
                                help="Ignore the consistency of the disks on"
1045
                                " the secondary")
1046

    
1047
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1048
                                dest="allow_failover",
1049
                                action="store_true", default=False,
1050
                                help="If migration is not possible fallback to"
1051
                                     " failover")
1052

    
1053
NONLIVE_OPT = cli_option("--non-live", dest="live",
1054
                         default=True, action="store_false",
1055
                         help="Do a non-live migration (this usually means"
1056
                         " freeze the instance, save the state, transfer and"
1057
                         " only then resume running on the secondary node)")
1058

    
1059
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1060
                                default=None,
1061
                                choices=list(constants.HT_MIGRATION_MODES),
1062
                                help="Override default migration mode (choose"
1063
                                " either live or non-live")
1064

    
1065
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1066
                                help="Target node and optional secondary node",
1067
                                metavar="<pnode>[:<snode>]",
1068
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1069

    
1070
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1071
                           action="append", metavar="<node>",
1072
                           help="Use only this node (can be used multiple"
1073
                           " times, if not given defaults to all nodes)",
1074
                           completion_suggest=OPT_COMPL_ONE_NODE)
1075

    
1076
NODEGROUP_OPT_NAME = "--node-group"
1077
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1078
                           dest="nodegroup",
1079
                           help="Node group (name or uuid)",
1080
                           metavar="<nodegroup>",
1081
                           default=None, type="string",
1082
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1083

    
1084
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1085
                             metavar="<node>",
1086
                             completion_suggest=OPT_COMPL_ONE_NODE)
1087

    
1088
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1089
                         action="store_false",
1090
                         help="Don't start the instance after creation")
1091

    
1092
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1093
                         action="store_true", default=False,
1094
                         help="Show command instead of executing it")
1095

    
1096
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1097
                         default=False, action="store_true",
1098
                         help="Instead of performing the migration/failover,"
1099
                         " try to recover from a failed cleanup. This is safe"
1100
                         " to run even if the instance is healthy, but it"
1101
                         " will create extra replication traffic and "
1102
                         " disrupt briefly the replication (like during the"
1103
                         " migration/failover")
1104

    
1105
STATIC_OPT = cli_option("-s", "--static", dest="static",
1106
                        action="store_true", default=False,
1107
                        help="Only show configuration data, not runtime data")
1108

    
1109
ALL_OPT = cli_option("--all", dest="show_all",
1110
                     default=False, action="store_true",
1111
                     help="Show info on all instances on the cluster."
1112
                     " This can take a long time to run, use wisely")
1113

    
1114
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1115
                           action="store_true", default=False,
1116
                           help="Interactive OS reinstall, lists available"
1117
                           " OS templates for selection")
1118

    
1119
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1120
                                 action="store_true", default=False,
1121
                                 help="Remove the instance from the cluster"
1122
                                 " configuration even if there are failures"
1123
                                 " during the removal process")
1124

    
1125
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1126
                                        dest="ignore_remove_failures",
1127
                                        action="store_true", default=False,
1128
                                        help="Remove the instance from the"
1129
                                        " cluster configuration even if there"
1130
                                        " are failures during the removal"
1131
                                        " process")
1132

    
1133
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1134
                                 action="store_true", default=False,
1135
                                 help="Remove the instance from the cluster")
1136

    
1137
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1138
                               help="Specifies the new node for the instance",
1139
                               metavar="NODE", default=None,
1140
                               completion_suggest=OPT_COMPL_ONE_NODE)
1141

    
1142
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1143
                               help="Specifies the new secondary node",
1144
                               metavar="NODE", default=None,
1145
                               completion_suggest=OPT_COMPL_ONE_NODE)
1146

    
1147
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1148
                             help="Specifies the new primary node",
1149
                             metavar="<node>", default=None,
1150
                             completion_suggest=OPT_COMPL_ONE_NODE)
1151

    
1152
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1153
                            default=False, action="store_true",
1154
                            help="Replace the disk(s) on the primary"
1155
                                 " node (applies only to internally mirrored"
1156
                                 " disk templates, e.g. %s)" %
1157
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1158

    
1159
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1160
                              default=False, action="store_true",
1161
                              help="Replace the disk(s) on the secondary"
1162
                                   " node (applies only to internally mirrored"
1163
                                   " disk templates, e.g. %s)" %
1164
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1165

    
1166
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1167
                              default=False, action="store_true",
1168
                              help="Lock all nodes and auto-promote as needed"
1169
                              " to MC status")
1170

    
1171
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1172
                              default=False, action="store_true",
1173
                              help="Automatically replace faulty disks"
1174
                                   " (applies only to internally mirrored"
1175
                                   " disk templates, e.g. %s)" %
1176
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1177

    
1178
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1179
                             default=False, action="store_true",
1180
                             help="Ignore current recorded size"
1181
                             " (useful for forcing activation when"
1182
                             " the recorded size is wrong)")
1183

    
1184
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1185
                          metavar="<node>",
1186
                          completion_suggest=OPT_COMPL_ONE_NODE)
1187

    
1188
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1189
                         metavar="<dir>")
1190

    
1191
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1192
                              help="Specify the secondary ip for the node",
1193
                              metavar="ADDRESS", default=None)
1194

    
1195
READD_OPT = cli_option("--readd", dest="readd",
1196
                       default=False, action="store_true",
1197
                       help="Readd old node after replacing it")
1198

    
1199
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1200
                                default=True, action="store_false",
1201
                                help="Disable SSH key fingerprint checking")
1202

    
1203
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1204
                                 default=False, action="store_true",
1205
                                 help="Force the joining of a node")
1206

    
1207
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1208
                    type="bool", default=None, metavar=_YORNO,
1209
                    help="Set the master_candidate flag on the node")
1210

    
1211
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1212
                         type="bool", default=None,
1213
                         help=("Set the offline flag on the node"
1214
                               " (cluster does not communicate with offline"
1215
                               " nodes)"))
1216

    
1217
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1218
                         type="bool", default=None,
1219
                         help=("Set the drained flag on the node"
1220
                               " (excluded from allocation operations)"))
1221

    
1222
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1223
                              type="bool", default=None, metavar=_YORNO,
1224
                              help="Set the master_capable flag on the node")
1225

    
1226
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1227
                          type="bool", default=None, metavar=_YORNO,
1228
                          help="Set the vm_capable flag on the node")
1229

    
1230
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1231
                             type="bool", default=None, metavar=_YORNO,
1232
                             help="Set the allocatable flag on a volume")
1233

    
1234
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1235
                            dest="enabled_hypervisors",
1236
                            help="Comma-separated list of hypervisors",
1237
                            type="string", default=None)
1238

    
1239
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1240
                                        dest="enabled_disk_templates",
1241
                                        help="Comma-separated list of "
1242
                                             "disk templates",
1243
                                        type="string", default=None)
1244

    
1245
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1246
                            type="keyval", default={},
1247
                            help="NIC parameters")
1248

    
1249
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1250
                         dest="candidate_pool_size", type="int",
1251
                         help="Set the candidate pool size")
1252

    
1253
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1254
                         help=("Enables LVM and specifies the volume group"
1255
                               " name (cluster-wide) for disk allocation"
1256
                               " [%s]" % constants.DEFAULT_VG),
1257
                         metavar="VG", default=None)
1258

    
1259
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1260
                          help="Destroy cluster", action="store_true")
1261

    
1262
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1263
                          help="Skip node agreement check (dangerous)",
1264
                          action="store_true", default=False)
1265

    
1266
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1267
                            help="Specify the mac prefix for the instance IP"
1268
                            " addresses, in the format XX:XX:XX",
1269
                            metavar="PREFIX",
1270
                            default=None)
1271

    
1272
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1273
                               help="Specify the node interface (cluster-wide)"
1274
                               " on which the master IP address will be added"
1275
                               " (cluster init default: %s)" %
1276
                               constants.DEFAULT_BRIDGE,
1277
                               metavar="NETDEV",
1278
                               default=None)
1279

    
1280
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1281
                                help="Specify the netmask of the master IP",
1282
                                metavar="NETMASK",
1283
                                default=None)
1284

    
1285
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1286
                                     dest="use_external_mip_script",
1287
                                     help="Specify whether to run a"
1288
                                     " user-provided script for the master"
1289
                                     " IP address turnup and"
1290
                                     " turndown operations",
1291
                                     type="bool", metavar=_YORNO, default=None)
1292

    
1293
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1294
                                help="Specify the default directory (cluster-"
1295
                                "wide) for storing the file-based disks [%s]" %
1296
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1297
                                metavar="DIR",
1298
                                default=None)
1299

    
1300
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1301
  "--shared-file-storage-dir",
1302
  dest="shared_file_storage_dir",
1303
  help="Specify the default directory (cluster-wide) for storing the"
1304
  " shared file-based disks [%s]" %
1305
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1306
  metavar="SHAREDDIR", default=None)
1307

    
1308
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1309
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1310
                                   action="store_false", default=True)
1311

    
1312
MODIFY_ETCHOSTS_OPT = \
1313
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1314
            default=None, type="bool",
1315
            help="Defines whether the cluster should autonomously modify"
1316
            " and keep in sync the /etc/hosts file of the nodes")
1317

    
1318
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1319
                                    help="Don't initialize SSH keys",
1320
                                    action="store_false", default=True)
1321

    
1322
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1323
                             help="Enable parseable error messages",
1324
                             action="store_true", default=False)
1325

    
1326
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1327
                          help="Skip N+1 memory redundancy tests",
1328
                          action="store_true", default=False)
1329

    
1330
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1331
                             help="Type of reboot: soft/hard/full",
1332
                             default=constants.INSTANCE_REBOOT_HARD,
1333
                             metavar="<REBOOT>",
1334
                             choices=list(constants.REBOOT_TYPES))
1335

    
1336
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1337
                                    dest="ignore_secondaries",
1338
                                    default=False, action="store_true",
1339
                                    help="Ignore errors from secondaries")
1340

    
1341
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1342
                            action="store_false", default=True,
1343
                            help="Don't shutdown the instance (unsafe)")
1344

    
1345
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1346
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1347
                         help="Maximum time to wait")
1348

    
1349
COMPRESS_OPT = cli_option("--compress", dest="compress",
1350
                          default=constants.IEC_NONE,
1351
                          help="The compression mode to use",
1352
                          choices=list(constants.IEC_ALL))
1353

    
1354
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1355
                                  dest="shutdown_timeout", type="int",
1356
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1357
                                  help="Maximum time to wait for instance"
1358
                                  " shutdown")
1359

    
1360
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1361
                          default=None,
1362
                          help=("Number of seconds between repetions of the"
1363
                                " command"))
1364

    
1365
EARLY_RELEASE_OPT = cli_option("--early-release",
1366
                               dest="early_release", default=False,
1367
                               action="store_true",
1368
                               help="Release the locks on the secondary"
1369
                               " node(s) early")
1370

    
1371
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1372
                                  dest="new_cluster_cert",
1373
                                  default=False, action="store_true",
1374
                                  help="Generate a new cluster certificate")
1375

    
1376
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1377
                           default=None,
1378
                           help="File containing new RAPI certificate")
1379

    
1380
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1381
                               default=None, action="store_true",
1382
                               help=("Generate a new self-signed RAPI"
1383
                                     " certificate"))
1384

    
1385
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1386
                            default=None,
1387
                            help="File containing new SPICE certificate")
1388

    
1389
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1390
                              default=None,
1391
                              help="File containing the certificate of the CA"
1392
                              " which signed the SPICE certificate")
1393

    
1394
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1395
                                dest="new_spice_cert", default=None,
1396
                                action="store_true",
1397
                                help=("Generate a new self-signed SPICE"
1398
                                      " certificate"))
1399

    
1400
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1401
                                    dest="new_confd_hmac_key",
1402
                                    default=False, action="store_true",
1403
                                    help=("Create a new HMAC key for %s" %
1404
                                          constants.CONFD))
1405

    
1406
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1407
                                       dest="cluster_domain_secret",
1408
                                       default=None,
1409
                                       help=("Load new new cluster domain"
1410
                                             " secret from file"))
1411

    
1412
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1413
                                           dest="new_cluster_domain_secret",
1414
                                           default=False, action="store_true",
1415
                                           help=("Create a new cluster domain"
1416
                                                 " secret"))
1417

    
1418
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1419
                              dest="use_replication_network",
1420
                              help="Whether to use the replication network"
1421
                              " for talking to the nodes",
1422
                              action="store_true", default=False)
1423

    
1424
MAINTAIN_NODE_HEALTH_OPT = \
1425
    cli_option("--maintain-node-health", dest="maintain_node_health",
1426
               metavar=_YORNO, default=None, type="bool",
1427
               help="Configure the cluster to automatically maintain node"
1428
               " health, by shutting down unknown instances, shutting down"
1429
               " unknown DRBD devices, etc.")
1430

    
1431
IDENTIFY_DEFAULTS_OPT = \
1432
    cli_option("--identify-defaults", dest="identify_defaults",
1433
               default=False, action="store_true",
1434
               help="Identify which saved instance parameters are equal to"
1435
               " the current cluster defaults and set them as such, instead"
1436
               " of marking them as overridden")
1437

    
1438
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1439
                         action="store", dest="uid_pool",
1440
                         help=("A list of user-ids or user-id"
1441
                               " ranges separated by commas"))
1442

    
1443
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1444
                          action="store", dest="add_uids",
1445
                          help=("A list of user-ids or user-id"
1446
                                " ranges separated by commas, to be"
1447
                                " added to the user-id pool"))
1448

    
1449
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1450
                             action="store", dest="remove_uids",
1451
                             help=("A list of user-ids or user-id"
1452
                                   " ranges separated by commas, to be"
1453
                                   " removed from the user-id pool"))
1454

    
1455
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1456
                              action="store", dest="reserved_lvs",
1457
                              help=("A comma-separated list of reserved"
1458
                                    " logical volumes names, that will be"
1459
                                    " ignored by cluster verify"))
1460

    
1461
ROMAN_OPT = cli_option("--roman",
1462
                       dest="roman_integers", default=False,
1463
                       action="store_true",
1464
                       help="Use roman numbers for positive integers")
1465

    
1466
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1467
                             action="store", default=None,
1468
                             help="Specifies usermode helper for DRBD")
1469

    
1470
PRIMARY_IP_VERSION_OPT = \
1471
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1472
               action="store", dest="primary_ip_version",
1473
               metavar="%d|%d" % (constants.IP4_VERSION,
1474
                                  constants.IP6_VERSION),
1475
               help="Cluster-wide IP version for primary IP")
1476

    
1477
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1478
                              action="store_true",
1479
                              help="Show machine name for every line in output")
1480

    
1481
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1482
                              action="store_true",
1483
                              help=("Hide successful results and show failures"
1484
                                    " only (determined by the exit code)"))
1485

    
1486
REASON_OPT = cli_option("--reason", default=None,
1487
                        help="The reason for executing the command")
1488

    
1489

    
1490
def _PriorityOptionCb(option, _, value, parser):
1491
  """Callback for processing C{--priority} option.
1492

1493
  """
1494
  value = _PRIONAME_TO_VALUE[value]
1495

    
1496
  setattr(parser.values, option.dest, value)
1497

    
1498

    
1499
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1500
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1501
                          choices=_PRIONAME_TO_VALUE.keys(),
1502
                          action="callback", type="choice",
1503
                          callback=_PriorityOptionCb,
1504
                          help="Priority for opcode processing")
1505

    
1506
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1507
                        type="bool", default=None, metavar=_YORNO,
1508
                        help="Sets the hidden flag on the OS")
1509

    
1510
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1511
                        type="bool", default=None, metavar=_YORNO,
1512
                        help="Sets the blacklisted flag on the OS")
1513

    
1514
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1515
                                     type="bool", metavar=_YORNO,
1516
                                     dest="prealloc_wipe_disks",
1517
                                     help=("Wipe disks prior to instance"
1518
                                           " creation"))
1519

    
1520
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1521
                             type="keyval", default=None,
1522
                             help="Node parameters")
1523

    
1524
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1525
                              action="store", metavar="POLICY", default=None,
1526
                              help="Allocation policy for the node group")
1527

    
1528
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1529
                              type="bool", metavar=_YORNO,
1530
                              dest="node_powered",
1531
                              help="Specify if the SoR for node is powered")
1532

    
1533
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1534
                             default=constants.OOB_TIMEOUT,
1535
                             help="Maximum time to wait for out-of-band helper")
1536

    
1537
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1538
                             default=constants.OOB_POWER_DELAY,
1539
                             help="Time in seconds to wait between power-ons")
1540

    
1541
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1542
                              action="store_true", default=False,
1543
                              help=("Whether command argument should be treated"
1544
                                    " as filter"))
1545

    
1546
NO_REMEMBER_OPT = cli_option("--no-remember",
1547
                             dest="no_remember",
1548
                             action="store_true", default=False,
1549
                             help="Perform but do not record the change"
1550
                             " in the configuration")
1551

    
1552
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1553
                              default=False, action="store_true",
1554
                              help="Evacuate primary instances only")
1555

    
1556
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1557
                                default=False, action="store_true",
1558
                                help="Evacuate secondary instances only"
1559
                                     " (applies only to internally mirrored"
1560
                                     " disk templates, e.g. %s)" %
1561
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1562

    
1563
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1564
                                action="store_true", default=False,
1565
                                help="Pause instance at startup")
1566

    
1567
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1568
                          help="Destination node group (name or uuid)",
1569
                          default=None, action="append",
1570
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1571

    
1572
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1573
                               action="append", dest="ignore_errors",
1574
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1575
                               help="Error code to be ignored")
1576

    
1577
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1578
                            action="append",
1579
                            help=("Specify disk state information in the"
1580
                                  " format"
1581
                                  " storage_type/identifier:option=value,...;"
1582
                                  " note this is unused for now"),
1583
                            type="identkeyval")
1584

    
1585
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1586
                          action="append",
1587
                          help=("Specify hypervisor state information in the"
1588
                                " format hypervisor:option=value,...;"
1589
                                " note this is unused for now"),
1590
                          type="identkeyval")
1591

    
1592
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1593
                                action="store_true", default=False,
1594
                                help="Ignore instance policy violations")
1595

    
1596
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1597
                             help="Sets the instance's runtime memory,"
1598
                             " ballooning it up or down to the new value",
1599
                             default=None, type="unit", metavar="<size>")
1600

    
1601
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1602
                          action="store_true", default=False,
1603
                          help="Marks the grow as absolute instead of the"
1604
                          " (default) relative mode")
1605

    
1606
NETWORK_OPT = cli_option("--network",
1607
                         action="store", default=None, dest="network",
1608
                         help="IP network in CIDR notation")
1609

    
1610
GATEWAY_OPT = cli_option("--gateway",
1611
                         action="store", default=None, dest="gateway",
1612
                         help="IP address of the router (gateway)")
1613

    
1614
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1615
                                  action="store", default=None,
1616
                                  dest="add_reserved_ips",
1617
                                  help="Comma-separated list of"
1618
                                  " reserved IPs to add")
1619

    
1620
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1621
                                     action="store", default=None,
1622
                                     dest="remove_reserved_ips",
1623
                                     help="Comma-delimited list of"
1624
                                     " reserved IPs to remove")
1625

    
1626
NETWORK6_OPT = cli_option("--network6",
1627
                          action="store", default=None, dest="network6",
1628
                          help="IP network in CIDR notation")
1629

    
1630
GATEWAY6_OPT = cli_option("--gateway6",
1631
                          action="store", default=None, dest="gateway6",
1632
                          help="IP6 address of the router (gateway)")
1633

    
1634
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1635
                                  dest="conflicts_check",
1636
                                  default=True,
1637
                                  action="store_false",
1638
                                  help="Don't check for conflicting IPs")
1639

    
1640
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1641
                                 default=False, action="store_true",
1642
                                 help="Include default values")
1643

    
1644
#: Options provided by all commands
1645
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1646

    
1647
# options related to asynchronous job handling
1648

    
1649
SUBMIT_OPTS = [
1650
  SUBMIT_OPT,
1651
  PRINT_JOBID_OPT,
1652
  ]
1653

    
1654
# common options for creating instances. add and import then add their own
1655
# specific ones.
1656
COMMON_CREATE_OPTS = [
1657
  BACKEND_OPT,
1658
  DISK_OPT,
1659
  DISK_TEMPLATE_OPT,
1660
  FILESTORE_DIR_OPT,
1661
  FILESTORE_DRIVER_OPT,
1662
  HYPERVISOR_OPT,
1663
  IALLOCATOR_OPT,
1664
  NET_OPT,
1665
  NODE_PLACEMENT_OPT,
1666
  NOIPCHECK_OPT,
1667
  NOCONFLICTSCHECK_OPT,
1668
  NONAMECHECK_OPT,
1669
  NONICS_OPT,
1670
  NWSYNC_OPT,
1671
  OSPARAMS_OPT,
1672
  OS_SIZE_OPT,
1673
  SUBMIT_OPT,
1674
  PRINT_JOBID_OPT,
1675
  TAG_ADD_OPT,
1676
  DRY_RUN_OPT,
1677
  PRIORITY_OPT,
1678
  ]
1679

    
1680
# common instance policy options
1681
INSTANCE_POLICY_OPTS = [
1682
  IPOLICY_BOUNDS_SPECS_OPT,
1683
  IPOLICY_DISK_TEMPLATES,
1684
  IPOLICY_VCPU_RATIO,
1685
  IPOLICY_SPINDLE_RATIO,
1686
  ]
1687

    
1688
# instance policy split specs options
1689
SPLIT_ISPECS_OPTS = [
1690
  SPECS_CPU_COUNT_OPT,
1691
  SPECS_DISK_COUNT_OPT,
1692
  SPECS_DISK_SIZE_OPT,
1693
  SPECS_MEM_SIZE_OPT,
1694
  SPECS_NIC_COUNT_OPT,
1695
  ]
1696

    
1697

    
1698
class _ShowUsage(Exception):
1699
  """Exception class for L{_ParseArgs}.
1700

1701
  """
1702
  def __init__(self, exit_error):
1703
    """Initializes instances of this class.
1704

1705
    @type exit_error: bool
1706
    @param exit_error: Whether to report failure on exit
1707

1708
    """
1709
    Exception.__init__(self)
1710
    self.exit_error = exit_error
1711

    
1712

    
1713
class _ShowVersion(Exception):
1714
  """Exception class for L{_ParseArgs}.
1715

1716
  """
1717

    
1718

    
1719
def _ParseArgs(binary, argv, commands, aliases, env_override):
1720
  """Parser for the command line arguments.
1721

1722
  This function parses the arguments and returns the function which
1723
  must be executed together with its (modified) arguments.
1724

1725
  @param binary: Script name
1726
  @param argv: Command line arguments
1727
  @param commands: Dictionary containing command definitions
1728
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1729
  @param env_override: list of env variables allowed for default args
1730
  @raise _ShowUsage: If usage description should be shown
1731
  @raise _ShowVersion: If version should be shown
1732

1733
  """
1734
  assert not (env_override - set(commands))
1735
  assert not (set(aliases.keys()) & set(commands.keys()))
1736

    
1737
  if len(argv) > 1:
1738
    cmd = argv[1]
1739
  else:
1740
    # No option or command given
1741
    raise _ShowUsage(exit_error=True)
1742

    
1743
  if cmd == "--version":
1744
    raise _ShowVersion()
1745
  elif cmd == "--help":
1746
    raise _ShowUsage(exit_error=False)
1747
  elif not (cmd in commands or cmd in aliases):
1748
    raise _ShowUsage(exit_error=True)
1749

    
1750
  # get command, unalias it, and look it up in commands
1751
  if cmd in aliases:
1752
    if aliases[cmd] not in commands:
1753
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1754
                                   " command '%s'" % (cmd, aliases[cmd]))
1755

    
1756
    cmd = aliases[cmd]
1757

    
1758
  if cmd in env_override:
1759
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1760
    env_args = os.environ.get(args_env_name)
1761
    if env_args:
1762
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1763

    
1764
  func, args_def, parser_opts, usage, description = commands[cmd]
1765
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1766
                        description=description,
1767
                        formatter=TitledHelpFormatter(),
1768
                        usage="%%prog %s %s" % (cmd, usage))
1769
  parser.disable_interspersed_args()
1770
  options, args = parser.parse_args(args=argv[2:])
1771

    
1772
  if not _CheckArguments(cmd, args_def, args):
1773
    return None, None, None
1774

    
1775
  return func, options, args
1776

    
1777

    
1778
def _FormatUsage(binary, commands):
1779
  """Generates a nice description of all commands.
1780

1781
  @param binary: Script name
1782
  @param commands: Dictionary containing command definitions
1783

1784
  """
1785
  # compute the max line length for cmd + usage
1786
  mlen = min(60, max(map(len, commands)))
1787

    
1788
  yield "Usage: %s {command} [options...] [argument...]" % binary
1789
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1790
  yield ""
1791
  yield "Commands:"
1792

    
1793
  # and format a nice command list
1794
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1795
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1796
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1797
    for line in help_lines:
1798
      yield " %-*s   %s" % (mlen, "", line)
1799

    
1800
  yield ""
1801

    
1802

    
1803
def _CheckArguments(cmd, args_def, args):
1804
  """Verifies the arguments using the argument definition.
1805

1806
  Algorithm:
1807

1808
    1. Abort with error if values specified by user but none expected.
1809

1810
    1. For each argument in definition
1811

1812
      1. Keep running count of minimum number of values (min_count)
1813
      1. Keep running count of maximum number of values (max_count)
1814
      1. If it has an unlimited number of values
1815

1816
        1. Abort with error if it's not the last argument in the definition
1817

1818
    1. If last argument has limited number of values
1819

1820
      1. Abort with error if number of values doesn't match or is too large
1821

1822
    1. Abort with error if user didn't pass enough values (min_count)
1823

1824
  """
1825
  if args and not args_def:
1826
    ToStderr("Error: Command %s expects no arguments", cmd)
1827
    return False
1828

    
1829
  min_count = None
1830
  max_count = None
1831
  check_max = None
1832

    
1833
  last_idx = len(args_def) - 1
1834

    
1835
  for idx, arg in enumerate(args_def):
1836
    if min_count is None:
1837
      min_count = arg.min
1838
    elif arg.min is not None:
1839
      min_count += arg.min
1840

    
1841
    if max_count is None:
1842
      max_count = arg.max
1843
    elif arg.max is not None:
1844
      max_count += arg.max
1845

    
1846
    if idx == last_idx:
1847
      check_max = (arg.max is not None)
1848

    
1849
    elif arg.max is None:
1850
      raise errors.ProgrammerError("Only the last argument can have max=None")
1851

    
1852
  if check_max:
1853
    # Command with exact number of arguments
1854
    if (min_count is not None and max_count is not None and
1855
        min_count == max_count and len(args) != min_count):
1856
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1857
      return False
1858

    
1859
    # Command with limited number of arguments
1860
    if max_count is not None and len(args) > max_count:
1861
      ToStderr("Error: Command %s expects only %d argument(s)",
1862
               cmd, max_count)
1863
      return False
1864

    
1865
  # Command with some required arguments
1866
  if min_count is not None and len(args) < min_count:
1867
    ToStderr("Error: Command %s expects at least %d argument(s)",
1868
             cmd, min_count)
1869
    return False
1870

    
1871
  return True
1872

    
1873

    
1874
def SplitNodeOption(value):
1875
  """Splits the value of a --node option.
1876

1877
  """
1878
  if value and ":" in value:
1879
    return value.split(":", 1)
1880
  else:
1881
    return (value, None)
1882

    
1883

    
1884
def CalculateOSNames(os_name, os_variants):
1885
  """Calculates all the names an OS can be called, according to its variants.
1886

1887
  @type os_name: string
1888
  @param os_name: base name of the os
1889
  @type os_variants: list or None
1890
  @param os_variants: list of supported variants
1891
  @rtype: list
1892
  @return: list of valid names
1893

1894
  """
1895
  if os_variants:
1896
    return ["%s+%s" % (os_name, v) for v in os_variants]
1897
  else:
1898
    return [os_name]
1899

    
1900

    
1901
def ParseFields(selected, default):
1902
  """Parses the values of "--field"-like options.
1903

1904
  @type selected: string or None
1905
  @param selected: User-selected options
1906
  @type default: list
1907
  @param default: Default fields
1908

1909
  """
1910
  if selected is None:
1911
    return default
1912

    
1913
  if selected.startswith("+"):
1914
    return default + selected[1:].split(",")
1915

    
1916
  return selected.split(",")
1917

    
1918

    
1919
UsesRPC = rpc.RunWithRPC
1920

    
1921

    
1922
def AskUser(text, choices=None):
1923
  """Ask the user a question.
1924

1925
  @param text: the question to ask
1926

1927
  @param choices: list with elements tuples (input_char, return_value,
1928
      description); if not given, it will default to: [('y', True,
1929
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1930
      note that the '?' char is reserved for help
1931

1932
  @return: one of the return values from the choices list; if input is
1933
      not possible (i.e. not running with a tty, we return the last
1934
      entry from the list
1935

1936
  """
1937
  if choices is None:
1938
    choices = [("y", True, "Perform the operation"),
1939
               ("n", False, "Do not perform the operation")]
1940
  if not choices or not isinstance(choices, list):
1941
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1942
  for entry in choices:
1943
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1944
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1945

    
1946
  answer = choices[-1][1]
1947
  new_text = []
1948
  for line in text.splitlines():
1949
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1950
  text = "\n".join(new_text)
1951
  try:
1952
    f = file("/dev/tty", "a+")
1953
  except IOError:
1954
    return answer
1955
  try:
1956
    chars = [entry[0] for entry in choices]
1957
    chars[-1] = "[%s]" % chars[-1]
1958
    chars.append("?")
1959
    maps = dict([(entry[0], entry[1]) for entry in choices])
1960
    while True:
1961
      f.write(text)
1962
      f.write("\n")
1963
      f.write("/".join(chars))
1964
      f.write(": ")
1965
      line = f.readline(2).strip().lower()
1966
      if line in maps:
1967
        answer = maps[line]
1968
        break
1969
      elif line == "?":
1970
        for entry in choices:
1971
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1972
        f.write("\n")
1973
        continue
1974
  finally:
1975
    f.close()
1976
  return answer
1977

    
1978

    
1979
class JobSubmittedException(Exception):
1980
  """Job was submitted, client should exit.
1981

1982
  This exception has one argument, the ID of the job that was
1983
  submitted. The handler should print this ID.
1984

1985
  This is not an error, just a structured way to exit from clients.
1986

1987
  """
1988

    
1989

    
1990
def SendJob(ops, cl=None):
1991
  """Function to submit an opcode without waiting for the results.
1992

1993
  @type ops: list
1994
  @param ops: list of opcodes
1995
  @type cl: luxi.Client
1996
  @param cl: the luxi client to use for communicating with the master;
1997
             if None, a new client will be created
1998

1999
  """
2000
  if cl is None:
2001
    cl = GetClient()
2002

    
2003
  job_id = cl.SubmitJob(ops)
2004

    
2005
  return job_id
2006

    
2007

    
2008
def GenericPollJob(job_id, cbs, report_cbs):
2009
  """Generic job-polling function.
2010

2011
  @type job_id: number
2012
  @param job_id: Job ID
2013
  @type cbs: Instance of L{JobPollCbBase}
2014
  @param cbs: Data callbacks
2015
  @type report_cbs: Instance of L{JobPollReportCbBase}
2016
  @param report_cbs: Reporting callbacks
2017

2018
  """
2019
  prev_job_info = None
2020
  prev_logmsg_serial = None
2021

    
2022
  status = None
2023

    
2024
  while True:
2025
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2026
                                      prev_logmsg_serial)
2027
    if not result:
2028
      # job not found, go away!
2029
      raise errors.JobLost("Job with id %s lost" % job_id)
2030

    
2031
    if result == constants.JOB_NOTCHANGED:
2032
      report_cbs.ReportNotChanged(job_id, status)
2033

    
2034
      # Wait again
2035
      continue
2036

    
2037
    # Split result, a tuple of (field values, log entries)
2038
    (job_info, log_entries) = result
2039
    (status, ) = job_info
2040

    
2041
    if log_entries:
2042
      for log_entry in log_entries:
2043
        (serial, timestamp, log_type, message) = log_entry
2044
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2045
                                    log_type, message)
2046
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2047

    
2048
    # TODO: Handle canceled and archived jobs
2049
    elif status in (constants.JOB_STATUS_SUCCESS,
2050
                    constants.JOB_STATUS_ERROR,
2051
                    constants.JOB_STATUS_CANCELING,
2052
                    constants.JOB_STATUS_CANCELED):
2053
      break
2054

    
2055
    prev_job_info = job_info
2056

    
2057
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2058
  if not jobs:
2059
    raise errors.JobLost("Job with id %s lost" % job_id)
2060

    
2061
  status, opstatus, result = jobs[0]
2062

    
2063
  if status == constants.JOB_STATUS_SUCCESS:
2064
    return result
2065

    
2066
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2067
    raise errors.OpExecError("Job was canceled")
2068

    
2069
  has_ok = False
2070
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2071
    if status == constants.OP_STATUS_SUCCESS:
2072
      has_ok = True
2073
    elif status == constants.OP_STATUS_ERROR:
2074
      errors.MaybeRaise(msg)
2075

    
2076
      if has_ok:
2077
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2078
                                 (idx, msg))
2079

    
2080
      raise errors.OpExecError(str(msg))
2081

    
2082
  # default failure mode
2083
  raise errors.OpExecError(result)
2084

    
2085

    
2086
class JobPollCbBase:
2087
  """Base class for L{GenericPollJob} callbacks.
2088

2089
  """
2090
  def __init__(self):
2091
    """Initializes this class.
2092

2093
    """
2094

    
2095
  def WaitForJobChangeOnce(self, job_id, fields,
2096
                           prev_job_info, prev_log_serial):
2097
    """Waits for changes on a job.
2098

2099
    """
2100
    raise NotImplementedError()
2101

    
2102
  def QueryJobs(self, job_ids, fields):
2103
    """Returns the selected fields for the selected job IDs.
2104

2105
    @type job_ids: list of numbers
2106
    @param job_ids: Job IDs
2107
    @type fields: list of strings
2108
    @param fields: Fields
2109

2110
    """
2111
    raise NotImplementedError()
2112

    
2113

    
2114
class JobPollReportCbBase:
2115
  """Base class for L{GenericPollJob} reporting callbacks.
2116

2117
  """
2118
  def __init__(self):
2119
    """Initializes this class.
2120

2121
    """
2122

    
2123
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2124
    """Handles a log message.
2125

2126
    """
2127
    raise NotImplementedError()
2128

    
2129
  def ReportNotChanged(self, job_id, status):
2130
    """Called for if a job hasn't changed in a while.
2131

2132
    @type job_id: number
2133
    @param job_id: Job ID
2134
    @type status: string or None
2135
    @param status: Job status if available
2136

2137
    """
2138
    raise NotImplementedError()
2139

    
2140

    
2141
class _LuxiJobPollCb(JobPollCbBase):
2142
  def __init__(self, cl):
2143
    """Initializes this class.
2144

2145
    """
2146
    JobPollCbBase.__init__(self)
2147
    self.cl = cl
2148

    
2149
  def WaitForJobChangeOnce(self, job_id, fields,
2150
                           prev_job_info, prev_log_serial):
2151
    """Waits for changes on a job.
2152

2153
    """
2154
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2155
                                        prev_job_info, prev_log_serial)
2156

    
2157
  def QueryJobs(self, job_ids, fields):
2158
    """Returns the selected fields for the selected job IDs.
2159

2160
    """
2161
    return self.cl.QueryJobs(job_ids, fields)
2162

    
2163

    
2164
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2165
  def __init__(self, feedback_fn):
2166
    """Initializes this class.
2167

2168
    """
2169
    JobPollReportCbBase.__init__(self)
2170

    
2171
    self.feedback_fn = feedback_fn
2172

    
2173
    assert callable(feedback_fn)
2174

    
2175
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2176
    """Handles a log message.
2177

2178
    """
2179
    self.feedback_fn((timestamp, log_type, log_msg))
2180

    
2181
  def ReportNotChanged(self, job_id, status):
2182
    """Called if a job hasn't changed in a while.
2183

2184
    """
2185
    # Ignore
2186

    
2187

    
2188
class StdioJobPollReportCb(JobPollReportCbBase):
2189
  def __init__(self):
2190
    """Initializes this class.
2191

2192
    """
2193
    JobPollReportCbBase.__init__(self)
2194

    
2195
    self.notified_queued = False
2196
    self.notified_waitlock = False
2197

    
2198
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2199
    """Handles a log message.
2200

2201
    """
2202
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2203
             FormatLogMessage(log_type, log_msg))
2204

    
2205
  def ReportNotChanged(self, job_id, status):
2206
    """Called if a job hasn't changed in a while.
2207

2208
    """
2209
    if status is None:
2210
      return
2211

    
2212
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2213
      ToStderr("Job %s is waiting in queue", job_id)
2214
      self.notified_queued = True
2215

    
2216
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2217
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2218
      self.notified_waitlock = True
2219

    
2220

    
2221
def FormatLogMessage(log_type, log_msg):
2222
  """Formats a job message according to its type.
2223

2224
  """
2225
  if log_type != constants.ELOG_MESSAGE:
2226
    log_msg = str(log_msg)
2227

    
2228
  return utils.SafeEncode(log_msg)
2229

    
2230

    
2231
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2232
  """Function to poll for the result of a job.
2233

2234
  @type job_id: job identified
2235
  @param job_id: the job to poll for results
2236
  @type cl: luxi.Client
2237
  @param cl: the luxi client to use for communicating with the master;
2238
             if None, a new client will be created
2239

2240
  """
2241
  if cl is None:
2242
    cl = GetClient()
2243

    
2244
  if reporter is None:
2245
    if feedback_fn:
2246
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2247
    else:
2248
      reporter = StdioJobPollReportCb()
2249
  elif feedback_fn:
2250
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2251

    
2252
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2253

    
2254

    
2255
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2256
  """Legacy function to submit an opcode.
2257

2258
  This is just a simple wrapper over the construction of the processor
2259
  instance. It should be extended to better handle feedback and
2260
  interaction functions.
2261

2262
  """
2263
  if cl is None:
2264
    cl = GetClient()
2265

    
2266
  SetGenericOpcodeOpts([op], opts)
2267

    
2268
  job_id = SendJob([op], cl=cl)
2269
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2270
    ToStdout("%d" % job_id)
2271

    
2272
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2273
                       reporter=reporter)
2274

    
2275
  return op_results[0]
2276

    
2277

    
2278
def SubmitOpCodeToDrainedQueue(op):
2279
  """Forcefully insert a job in the queue, even if it is drained.
2280

2281
  """
2282
  cl = GetClient()
2283
  job_id = cl.SubmitJobToDrainedQueue([op])
2284
  op_results = PollJob(job_id, cl=cl)
2285
  return op_results[0]
2286

    
2287

    
2288
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2289
  """Wrapper around SubmitOpCode or SendJob.
2290

2291
  This function will decide, based on the 'opts' parameter, whether to
2292
  submit and wait for the result of the opcode (and return it), or
2293
  whether to just send the job and print its identifier. It is used in
2294
  order to simplify the implementation of the '--submit' option.
2295

2296
  It will also process the opcodes if we're sending the via SendJob
2297
  (otherwise SubmitOpCode does it).
2298

2299
  """
2300
  if opts and opts.submit_only:
2301
    job = [op]
2302
    SetGenericOpcodeOpts(job, opts)
2303
    job_id = SendJob(job, cl=cl)
2304
    if opts.print_jobid:
2305
      ToStdout("%d" % job_id)
2306
    raise JobSubmittedException(job_id)
2307
  else:
2308
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2309

    
2310

    
2311
def _InitReasonTrail(op, opts):
2312
  """Builds the first part of the reason trail
2313

2314
  Builds the initial part of the reason trail, adding the user provided reason
2315
  (if it exists) and the name of the command starting the operation.
2316

2317
  @param op: the opcode the reason trail will be added to
2318
  @param opts: the command line options selected by the user
2319

2320
  """
2321
  assert len(sys.argv) >= 2
2322
  trail = []
2323

    
2324
  if opts.reason:
2325
    trail.append((constants.OPCODE_REASON_SRC_USER,
2326
                  opts.reason,
2327
                  utils.EpochNano()))
2328

    
2329
  binary = os.path.basename(sys.argv[0])
2330
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2331
  command = sys.argv[1]
2332
  trail.append((source, command, utils.EpochNano()))
2333
  op.reason = trail
2334

    
2335

    
2336
def SetGenericOpcodeOpts(opcode_list, options):
2337
  """Processor for generic options.
2338

2339
  This function updates the given opcodes based on generic command
2340
  line options (like debug, dry-run, etc.).
2341

2342
  @param opcode_list: list of opcodes
2343
  @param options: command line options or None
2344
  @return: None (in-place modification)
2345

2346
  """
2347
  if not options:
2348
    return
2349
  for op in opcode_list:
2350
    op.debug_level = options.debug
2351
    if hasattr(options, "dry_run"):
2352
      op.dry_run = options.dry_run
2353
    if getattr(options, "priority", None) is not None:
2354
      op.priority = options.priority
2355
    _InitReasonTrail(op, options)
2356

    
2357

    
2358
def GetClient(query=False):
2359
  """Connects to the a luxi socket and returns a client.
2360

2361
  @type query: boolean
2362
  @param query: this signifies that the client will only be
2363
      used for queries; if the build-time parameter
2364
      enable-split-queries is enabled, then the client will be
2365
      connected to the query socket instead of the masterd socket
2366

2367
  """
2368
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2369
  if override_socket:
2370
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2371
      address = pathutils.MASTER_SOCKET
2372
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2373
      address = pathutils.QUERY_SOCKET
2374
    else:
2375
      address = override_socket
2376
  elif query and constants.ENABLE_SPLIT_QUERY:
2377
    address = pathutils.QUERY_SOCKET
2378
  else:
2379
    address = None
2380
  # TODO: Cache object?
2381
  try:
2382
    client = luxi.Client(address=address)
2383
  except luxi.NoMasterError:
2384
    ss = ssconf.SimpleStore()
2385

    
2386
    # Try to read ssconf file
2387
    try:
2388
      ss.GetMasterNode()
2389
    except errors.ConfigurationError:
2390
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2391
                                 " not part of a cluster",
2392
                                 errors.ECODE_INVAL)
2393

    
2394
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2395
    if master != myself:
2396
      raise errors.OpPrereqError("This is not the master node, please connect"
2397
                                 " to node '%s' and rerun the command" %
2398
                                 master, errors.ECODE_INVAL)
2399
    raise
2400
  return client
2401

    
2402

    
2403
def FormatError(err):
2404
  """Return a formatted error message for a given error.
2405

2406
  This function takes an exception instance and returns a tuple
2407
  consisting of two values: first, the recommended exit code, and
2408
  second, a string describing the error message (not
2409
  newline-terminated).
2410

2411
  """
2412
  retcode = 1
2413
  obuf = StringIO()
2414
  msg = str(err)
2415
  if isinstance(err, errors.ConfigurationError):
2416
    txt = "Corrupt configuration file: %s" % msg
2417
    logging.error(txt)
2418
    obuf.write(txt + "\n")
2419
    obuf.write("Aborting.")
2420
    retcode = 2
2421
  elif isinstance(err, errors.HooksAbort):
2422
    obuf.write("Failure: hooks execution failed:\n")
2423
    for node, script, out in err.args[0]:
2424
      if out:
2425
        obuf.write("  node: %s, script: %s, output: %s\n" %
2426
                   (node, script, out))
2427
      else:
2428
        obuf.write("  node: %s, script: %s (no output)\n" %
2429
                   (node, script))
2430
  elif isinstance(err, errors.HooksFailure):
2431
    obuf.write("Failure: hooks general failure: %s" % msg)
2432
  elif isinstance(err, errors.ResolverError):
2433
    this_host = netutils.Hostname.GetSysName()
2434
    if err.args[0] == this_host:
2435
      msg = "Failure: can't resolve my own hostname ('%s')"
2436
    else:
2437
      msg = "Failure: can't resolve hostname '%s'"
2438
    obuf.write(msg % err.args[0])
2439
  elif isinstance(err, errors.OpPrereqError):
2440
    if len(err.args) == 2:
2441
      obuf.write("Failure: prerequisites not met for this"
2442
                 " operation:\nerror type: %s, error details:\n%s" %
2443
                 (err.args[1], err.args[0]))
2444
    else:
2445
      obuf.write("Failure: prerequisites not met for this"
2446
                 " operation:\n%s" % msg)
2447
  elif isinstance(err, errors.OpExecError):
2448
    obuf.write("Failure: command execution error:\n%s" % msg)
2449
  elif isinstance(err, errors.TagError):
2450
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2451
  elif isinstance(err, errors.JobQueueDrainError):
2452
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2453
               " accept new requests\n")
2454
  elif isinstance(err, errors.JobQueueFull):
2455
    obuf.write("Failure: the job queue is full and doesn't accept new"
2456
               " job submissions until old jobs are archived\n")
2457
  elif isinstance(err, errors.TypeEnforcementError):
2458
    obuf.write("Parameter Error: %s" % msg)
2459
  elif isinstance(err, errors.ParameterError):
2460
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2461
  elif isinstance(err, luxi.NoMasterError):
2462
    if err.args[0] == pathutils.MASTER_SOCKET:
2463
      daemon = "the master daemon"
2464
    elif err.args[0] == pathutils.QUERY_SOCKET:
2465
      daemon = "the config daemon"
2466
    else:
2467
      daemon = "socket '%s'" % str(err.args[0])
2468
    obuf.write("Cannot communicate with %s.\nIs the process running"
2469
               " and listening for connections?" % daemon)
2470
  elif isinstance(err, luxi.TimeoutError):
2471
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2472
               " been submitted and will continue to run even if the call"
2473
               " timed out. Useful commands in this situation are \"gnt-job"
2474
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2475
    obuf.write(msg)
2476
  elif isinstance(err, luxi.PermissionError):
2477
    obuf.write("It seems you don't have permissions to connect to the"
2478
               " master daemon.\nPlease retry as a different user.")
2479
  elif isinstance(err, luxi.ProtocolError):
2480
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2481
               "%s" % msg)
2482
  elif isinstance(err, errors.JobLost):
2483
    obuf.write("Error checking job status: %s" % msg)
2484
  elif isinstance(err, errors.QueryFilterParseError):
2485
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2486
    obuf.write("\n".join(err.GetDetails()))
2487
  elif isinstance(err, errors.GenericError):
2488
    obuf.write("Unhandled Ganeti error: %s" % msg)
2489
  elif isinstance(err, JobSubmittedException):
2490
    obuf.write("JobID: %s\n" % err.args[0])
2491
    retcode = 0
2492
  else:
2493
    obuf.write("Unhandled exception: %s" % msg)
2494
  return retcode, obuf.getvalue().rstrip("\n")
2495

    
2496

    
2497
def GenericMain(commands, override=None, aliases=None,
2498
                env_override=frozenset()):
2499
  """Generic main function for all the gnt-* commands.
2500

2501
  @param commands: a dictionary with a special structure, see the design doc
2502
                   for command line handling.
2503
  @param override: if not None, we expect a dictionary with keys that will
2504
                   override command line options; this can be used to pass
2505
                   options from the scripts to generic functions
2506
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2507
  @param env_override: list of environment names which are allowed to submit
2508
                       default args for commands
2509

2510
  """
2511
  # save the program name and the entire command line for later logging
2512
  if sys.argv:
2513
    binary = os.path.basename(sys.argv[0])
2514
    if not binary:
2515
      binary = sys.argv[0]
2516

    
2517
    if len(sys.argv) >= 2:
2518
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2519
    else:
2520
      logname = binary
2521

    
2522
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2523
  else:
2524
    binary = "<unknown program>"
2525
    cmdline = "<unknown>"
2526

    
2527
  if aliases is None:
2528
    aliases = {}
2529

    
2530
  try:
2531
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2532
                                       env_override)
2533
  except _ShowVersion:
2534
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2535
             constants.RELEASE_VERSION)
2536
    return constants.EXIT_SUCCESS
2537
  except _ShowUsage, err:
2538
    for line in _FormatUsage(binary, commands):
2539
      ToStdout(line)
2540

    
2541
    if err.exit_error:
2542
      return constants.EXIT_FAILURE
2543
    else:
2544
      return constants.EXIT_SUCCESS
2545
  except errors.ParameterError, err:
2546
    result, err_msg = FormatError(err)
2547
    ToStderr(err_msg)
2548
    return 1
2549

    
2550
  if func is None: # parse error
2551
    return 1
2552

    
2553
  if override is not None:
2554
    for key, val in override.iteritems():
2555
      setattr(options, key, val)
2556

    
2557
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2558
                     stderr_logging=True)
2559

    
2560
  logging.info("Command line: %s", cmdline)
2561

    
2562
  try:
2563
    result = func(options, args)
2564
  except (errors.GenericError, luxi.ProtocolError,
2565
          JobSubmittedException), err:
2566
    result, err_msg = FormatError(err)
2567
    logging.exception("Error during command processing")
2568
    ToStderr(err_msg)
2569
  except KeyboardInterrupt:
2570
    result = constants.EXIT_FAILURE
2571
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2572
             " might have been submitted and"
2573
             " will continue to run in the background.")
2574
  except IOError, err:
2575
    if err.errno == errno.EPIPE:
2576
      # our terminal went away, we'll exit
2577
      sys.exit(constants.EXIT_FAILURE)
2578
    else:
2579
      raise
2580

    
2581
  return result
2582

    
2583

    
2584
def ParseNicOption(optvalue):
2585
  """Parses the value of the --net option(s).
2586

2587
  """
2588
  try:
2589
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2590
  except (TypeError, ValueError), err:
2591
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2592
                               errors.ECODE_INVAL)
2593

    
2594
  nics = [{}] * nic_max
2595
  for nidx, ndict in optvalue:
2596
    nidx = int(nidx)
2597

    
2598
    if not isinstance(ndict, dict):
2599
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2600
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2601

    
2602
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2603

    
2604
    nics[nidx] = ndict
2605

    
2606
  return nics
2607

    
2608

    
2609
def GenericInstanceCreate(mode, opts, args):
2610
  """Add an instance to the cluster via either creation or import.
2611

2612
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2613
  @param opts: the command line options selected by the user
2614
  @type args: list
2615
  @param args: should contain only one element, the new instance name
2616
  @rtype: int
2617
  @return: the desired exit code
2618

2619
  """
2620
  instance = args[0]
2621

    
2622
  (pnode, snode) = SplitNodeOption(opts.node)
2623

    
2624
  hypervisor = None
2625
  hvparams = {}
2626
  if opts.hypervisor:
2627
    hypervisor, hvparams = opts.hypervisor
2628

    
2629
  if opts.nics:
2630
    nics = ParseNicOption(opts.nics)
2631
  elif opts.no_nics:
2632
    # no nics
2633
    nics = []
2634
  elif mode == constants.INSTANCE_CREATE:
2635
    # default of one nic, all auto
2636
    nics = [{}]
2637
  else:
2638
    # mode == import
2639
    nics = []
2640

    
2641
  if opts.disk_template == constants.DT_DISKLESS:
2642
    if opts.disks or opts.sd_size is not None:
2643
      raise errors.OpPrereqError("Diskless instance but disk"
2644
                                 " information passed", errors.ECODE_INVAL)
2645
    disks = []
2646
  else:
2647
    if (not opts.disks and not opts.sd_size
2648
        and mode == constants.INSTANCE_CREATE):
2649
      raise errors.OpPrereqError("No disk information specified",
2650
                                 errors.ECODE_INVAL)
2651
    if opts.disks and opts.sd_size is not None:
2652
      raise errors.OpPrereqError("Please use either the '--disk' or"
2653
                                 " '-s' option", errors.ECODE_INVAL)
2654
    if opts.sd_size is not None:
2655
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2656

    
2657
    if opts.disks:
2658
      try:
2659
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2660
      except ValueError, err:
2661
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2662
                                   errors.ECODE_INVAL)
2663
      disks = [{}] * disk_max
2664
    else:
2665
      disks = []
2666
    for didx, ddict in opts.disks:
2667
      didx = int(didx)
2668
      if not isinstance(ddict, dict):
2669
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2670
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2671
      elif constants.IDISK_SIZE in ddict:
2672
        if constants.IDISK_ADOPT in ddict:
2673
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2674
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2675
        try:
2676
          ddict[constants.IDISK_SIZE] = \
2677
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2678
        except ValueError, err:
2679
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2680
                                     (didx, err), errors.ECODE_INVAL)
2681
      elif constants.IDISK_ADOPT in ddict:
2682
        if constants.IDISK_SPINDLES in ddict:
2683
          raise errors.OpPrereqError("spindles is not a valid option when"
2684
                                     " adopting a disk", errors.ECODE_INVAL)
2685
        if mode == constants.INSTANCE_IMPORT:
2686
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2687
                                     " import", errors.ECODE_INVAL)
2688
        ddict[constants.IDISK_SIZE] = 0
2689
      else:
2690
        raise errors.OpPrereqError("Missing size or adoption source for"
2691
                                   " disk %d" % didx, errors.ECODE_INVAL)
2692
      disks[didx] = ddict
2693

    
2694
  if opts.tags is not None:
2695
    tags = opts.tags.split(",")
2696
  else:
2697
    tags = []
2698

    
2699
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2700
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2701

    
2702
  if mode == constants.INSTANCE_CREATE:
2703
    start = opts.start
2704
    os_type = opts.os
2705
    force_variant = opts.force_variant
2706
    src_node = None
2707
    src_path = None
2708
    no_install = opts.no_install
2709
    identify_defaults = False
2710
    compress = constants.IEC_NONE
2711
  elif mode == constants.INSTANCE_IMPORT:
2712
    start = False
2713
    os_type = None
2714
    force_variant = False
2715
    src_node = opts.src_node
2716
    src_path = opts.src_dir
2717
    no_install = None
2718
    identify_defaults = opts.identify_defaults
2719
    compress = opts.compress
2720
  else:
2721
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2722

    
2723
  op = opcodes.OpInstanceCreate(instance_name=instance,
2724
                                disks=disks,
2725
                                disk_template=opts.disk_template,
2726
                                nics=nics,
2727
                                conflicts_check=opts.conflicts_check,
2728
                                pnode=pnode, snode=snode,
2729
                                ip_check=opts.ip_check,
2730
                                name_check=opts.name_check,
2731
                                wait_for_sync=opts.wait_for_sync,
2732
                                file_storage_dir=opts.file_storage_dir,
2733
                                file_driver=opts.file_driver,
2734
                                iallocator=opts.iallocator,
2735
                                hypervisor=hypervisor,
2736
                                hvparams=hvparams,
2737
                                beparams=opts.beparams,
2738
                                osparams=opts.osparams,
2739
                                mode=mode,
2740
                                start=start,
2741
                                os_type=os_type,
2742
                                force_variant=force_variant,
2743
                                src_node=src_node,
2744
                                src_path=src_path,
2745
                                compress=compress,
2746
                                tags=tags,
2747
                                no_install=no_install,
2748
                                identify_defaults=identify_defaults,
2749
                                ignore_ipolicy=opts.ignore_ipolicy)
2750

    
2751
  SubmitOrSend(op, opts)
2752
  return 0
2753

    
2754

    
2755
class _RunWhileClusterStoppedHelper:
2756
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2757

2758
  """
2759
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2760
    """Initializes this class.
2761

2762
    @type feedback_fn: callable
2763
    @param feedback_fn: Feedback function
2764
    @type cluster_name: string
2765
    @param cluster_name: Cluster name
2766
    @type master_node: string
2767
    @param master_node Master node name
2768
    @type online_nodes: list
2769
    @param online_nodes: List of names of online nodes
2770

2771
    """
2772
    self.feedback_fn = feedback_fn
2773
    self.cluster_name = cluster_name
2774
    self.master_node = master_node
2775
    self.online_nodes = online_nodes
2776

    
2777
    self.ssh = ssh.SshRunner(self.cluster_name)
2778

    
2779
    self.nonmaster_nodes = [name for name in online_nodes
2780
                            if name != master_node]
2781

    
2782
    assert self.master_node not in self.nonmaster_nodes
2783

    
2784
  def _RunCmd(self, node_name, cmd):
2785
    """Runs a command on the local or a remote machine.
2786

2787
    @type node_name: string
2788
    @param node_name: Machine name
2789
    @type cmd: list
2790
    @param cmd: Command
2791

2792
    """
2793
    if node_name is None or node_name == self.master_node:
2794
      # No need to use SSH
2795
      result = utils.RunCmd(cmd)
2796
    else:
2797
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2798
                            utils.ShellQuoteArgs(cmd))
2799

    
2800
    if result.failed:
2801
      errmsg = ["Failed to run command %s" % result.cmd]
2802
      if node_name:
2803
        errmsg.append("on node %s" % node_name)
2804
      errmsg.append(": exitcode %s and error %s" %
2805
                    (result.exit_code, result.output))
2806
      raise errors.OpExecError(" ".join(errmsg))
2807

    
2808
  def Call(self, fn, *args):
2809
    """Call function while all daemons are stopped.
2810

2811
    @type fn: callable
2812
    @param fn: Function to be called
2813

2814
    """
2815
    # Pause watcher by acquiring an exclusive lock on watcher state file
2816
    self.feedback_fn("Blocking watcher")
2817
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2818
    try:
2819
      # TODO: Currently, this just blocks. There's no timeout.
2820
      # TODO: Should it be a shared lock?
2821
      watcher_block.Exclusive(blocking=True)
2822

    
2823
      # Stop master daemons, so that no new jobs can come in and all running
2824
      # ones are finished
2825
      self.feedback_fn("Stopping master daemons")
2826
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2827
      try:
2828
        # Stop daemons on all nodes
2829
        for node_name in self.online_nodes:
2830
          self.feedback_fn("Stopping daemons on %s" % node_name)
2831
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2832

    
2833
        # All daemons are shut down now
2834
        try:
2835
          return fn(self, *args)
2836
        except Exception, err:
2837
          _, errmsg = FormatError(err)
2838
          logging.exception("Caught exception")
2839
          self.feedback_fn(errmsg)
2840
          raise
2841
      finally:
2842
        # Start cluster again, master node last
2843
        for node_name in self.nonmaster_nodes + [self.master_node]:
2844
          self.feedback_fn("Starting daemons on %s" % node_name)
2845
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2846
    finally:
2847
      # Resume watcher
2848
      watcher_block.Close()
2849

    
2850

    
2851
def RunWhileClusterStopped(feedback_fn, fn, *args):
2852
  """Calls a function while all cluster daemons are stopped.
2853

2854
  @type feedback_fn: callable
2855
  @param feedback_fn: Feedback function
2856
  @type fn: callable
2857
  @param fn: Function to be called when daemons are stopped
2858

2859
  """
2860
  feedback_fn("Gathering cluster information")
2861

    
2862
  # This ensures we're running on the master daemon
2863
  cl = GetClient()
2864

    
2865
  (cluster_name, master_node) = \
2866
    cl.QueryConfigValues(["cluster_name", "master_node"])
2867

    
2868
  online_nodes = GetOnlineNodes([], cl=cl)
2869

    
2870
  # Don't keep a reference to the client. The master daemon will go away.
2871
  del cl
2872

    
2873
  assert master_node in online_nodes
2874

    
2875
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2876
                                       online_nodes).Call(fn, *args)
2877

    
2878

    
2879
def GenerateTable(headers, fields, separator, data,
2880
                  numfields=None, unitfields=None,
2881
                  units=None):
2882
  """Prints a table with headers and different fields.
2883

2884
  @type headers: dict
2885
  @param headers: dictionary mapping field names to headers for
2886
      the table
2887
  @type fields: list
2888
  @param fields: the field names corresponding to each row in
2889
      the data field
2890
  @param separator: the separator to be used; if this is None,
2891
      the default 'smart' algorithm is used which computes optimal
2892
      field width, otherwise just the separator is used between
2893
      each field
2894
  @type data: list
2895
  @param data: a list of lists, each sublist being one row to be output
2896
  @type numfields: list
2897
  @param numfields: a list with the fields that hold numeric
2898
      values and thus should be right-aligned
2899
  @type unitfields: list
2900
  @param unitfields: a list with the fields that hold numeric
2901
      values that should be formatted with the units field
2902
  @type units: string or None
2903
  @param units: the units we should use for formatting, or None for
2904
      automatic choice (human-readable for non-separator usage, otherwise
2905
      megabytes); this is a one-letter string
2906

2907
  """
2908
  if units is None:
2909
    if separator:
2910
      units = "m"
2911
    else:
2912
      units = "h"
2913

    
2914
  if numfields is None:
2915
    numfields = []
2916
  if unitfields is None:
2917
    unitfields = []
2918

    
2919
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2920
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2921

    
2922
  format_fields = []
2923
  for field in fields:
2924
    if headers and field not in headers:
2925
      # TODO: handle better unknown fields (either revert to old
2926
      # style of raising exception, or deal more intelligently with
2927
      # variable fields)
2928
      headers[field] = field
2929
    if separator is not None:
2930
      format_fields.append("%s")
2931
    elif numfields.Matches(field):
2932
      format_fields.append("%*s")
2933
    else:
2934
      format_fields.append("%-*s")
2935

    
2936
  if separator is None:
2937
    mlens = [0 for name in fields]
2938
    format_str = " ".join(format_fields)
2939
  else:
2940
    format_str = separator.replace("%", "%%").join(format_fields)
2941

    
2942
  for row in data:
2943
    if row is None:
2944
      continue
2945
    for idx, val in enumerate(row):
2946
      if unitfields.Matches(fields[idx]):
2947
        try:
2948
          val = int(val)
2949
        except (TypeError, ValueError):
2950
          pass
2951
        else:
2952
          val = row[idx] = utils.FormatUnit(val, units)
2953
      val = row[idx] = str(val)
2954
      if separator is None:
2955
        mlens[idx] = max(mlens[idx], len(val))
2956

    
2957
  result = []
2958
  if headers:
2959
    args = []
2960
    for idx, name in enumerate(fields):
2961
      hdr = headers[name]
2962
      if separator is None:
2963
        mlens[idx] = max(mlens[idx], len(hdr))
2964
        args.append(mlens[idx])
2965
      args.append(hdr)
2966
    result.append(format_str % tuple(args))
2967

    
2968
  if separator is None:
2969
    assert len(mlens) == len(fields)
2970

    
2971
    if fields and not numfields.Matches(fields[-1]):
2972
      mlens[-1] = 0
2973

    
2974
  for line in data:
2975
    args = []
2976
    if line is None:
2977
      line = ["-" for _ in fields]
2978
    for idx in range(len(fields)):
2979
      if separator is None:
2980
        args.append(mlens[idx])
2981
      args.append(line[idx])
2982
    result.append(format_str % tuple(args))
2983

    
2984
  return result
2985

    
2986

    
2987
def _FormatBool(value):
2988
  """Formats a boolean value as a string.
2989

2990
  """
2991
  if value:
2992
    return "Y"
2993
  return "N"
2994

    
2995

    
2996
#: Default formatting for query results; (callback, align right)
2997
_DEFAULT_FORMAT_QUERY = {
2998
  constants.QFT_TEXT: (str, False),
2999
  constants.QFT_BOOL: (_FormatBool, False),
3000
  constants.QFT_NUMBER: (str, True),
3001
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3002
  constants.QFT_OTHER: (str, False),
3003
  constants.QFT_UNKNOWN: (str, False),
3004
  }
3005

    
3006

    
3007
def _GetColumnFormatter(fdef, override, unit):
3008
  """Returns formatting function for a field.
3009

3010
  @type fdef: L{objects.QueryFieldDefinition}
3011
  @type override: dict
3012
  @param override: Dictionary for overriding field formatting functions,
3013
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3014
  @type unit: string
3015
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3016
  @rtype: tuple; (callable, bool)
3017
  @return: Returns the function to format a value (takes one parameter) and a
3018
    boolean for aligning the value on the right-hand side
3019

3020
  """
3021
  fmt = override.get(fdef.name, None)
3022
  if fmt is not None:
3023
    return fmt
3024

    
3025
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3026

    
3027
  if fdef.kind == constants.QFT_UNIT:
3028
    # Can't keep this information in the static dictionary
3029
    return (lambda value: utils.FormatUnit(value, unit), True)
3030

    
3031
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3032
  if fmt is not None:
3033
    return fmt
3034

    
3035
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3036

    
3037

    
3038
class _QueryColumnFormatter:
3039
  """Callable class for formatting fields of a query.
3040

3041
  """
3042
  def __init__(self, fn, status_fn, verbose):
3043
    """Initializes this class.
3044

3045
    @type fn: callable
3046
    @param fn: Formatting function
3047
    @type status_fn: callable
3048
    @param status_fn: Function to report fields' status
3049
    @type verbose: boolean
3050
    @param verbose: whether to use verbose field descriptions or not
3051

3052
    """
3053
    self._fn = fn
3054
    self._status_fn = status_fn
3055
    self._verbose = verbose
3056

    
3057
  def __call__(self, data):
3058
    """Returns a field's string representation.
3059

3060
    """
3061
    (status, value) = data
3062

    
3063
    # Report status
3064
    self._status_fn(status)
3065

    
3066
    if status == constants.RS_NORMAL:
3067
      return self._fn(value)
3068

    
3069
    assert value is None, \
3070
           "Found value %r for abnormal status %s" % (value, status)
3071

    
3072
    return FormatResultError(status, self._verbose)
3073

    
3074

    
3075
def FormatResultError(status, verbose):
3076
  """Formats result status other than L{constants.RS_NORMAL}.
3077

3078
  @param status: The result status
3079
  @type verbose: boolean
3080
  @param verbose: Whether to return the verbose text
3081
  @return: Text of result status
3082

3083
  """
3084
  assert status != constants.RS_NORMAL, \
3085
         "FormatResultError called with status equal to constants.RS_NORMAL"
3086
  try:
3087
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3088
  except KeyError:
3089
    raise NotImplementedError("Unknown status %s" % status)
3090
  else:
3091
    if verbose:
3092
      return verbose_text
3093
    return normal_text
3094

    
3095

    
3096
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3097
                      header=False, verbose=False):
3098
  """Formats data in L{objects.QueryResponse}.
3099

3100
  @type result: L{objects.QueryResponse}
3101
  @param result: result of query operation
3102
  @type unit: string
3103
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3104
    see L{utils.text.FormatUnit}
3105
  @type format_override: dict
3106
  @param format_override: Dictionary for overriding field formatting functions,
3107
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3108
  @type separator: string or None
3109
  @param separator: String used to separate fields
3110
  @type header: bool
3111
  @param header: Whether to output header row
3112
  @type verbose: boolean
3113
  @param verbose: whether to use verbose field descriptions or not
3114

3115
  """
3116
  if unit is None:
3117
    if separator:
3118
      unit = "m"
3119
    else:
3120
      unit = "h"
3121

    
3122
  if format_override is None:
3123
    format_override = {}
3124

    
3125
  stats = dict.fromkeys(constants.RS_ALL, 0)
3126

    
3127
  def _RecordStatus(status):
3128
    if status in stats:
3129
      stats[status] += 1
3130

    
3131
  columns = []
3132
  for fdef in result.fields:
3133
    assert fdef.title and fdef.name
3134
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3135
    columns.append(TableColumn(fdef.title,
3136
                               _QueryColumnFormatter(fn, _RecordStatus,
3137
                                                     verbose),
3138
                               align_right))
3139

    
3140
  table = FormatTable(result.data, columns, header, separator)
3141

    
3142
  # Collect statistics
3143
  assert len(stats) == len(constants.RS_ALL)
3144
  assert compat.all(count >= 0 for count in stats.values())
3145

    
3146
  # Determine overall status. If there was no data, unknown fields must be
3147
  # detected via the field definitions.
3148
  if (stats[constants.RS_UNKNOWN] or
3149
      (not result.data and _GetUnknownFields(result.fields))):
3150
    status = QR_UNKNOWN
3151
  elif compat.any(count > 0 for key, count in stats.items()
3152
                  if key != constants.RS_NORMAL):
3153
    status = QR_INCOMPLETE
3154
  else:
3155
    status = QR_NORMAL
3156

    
3157
  return (status, table)
3158

    
3159

    
3160
def _GetUnknownFields(fdefs):
3161
  """Returns list of unknown fields included in C{fdefs}.
3162

3163
  @type fdefs: list of L{objects.QueryFieldDefinition}
3164

3165
  """
3166
  return [fdef for fdef in fdefs
3167
          if fdef.kind == constants.QFT_UNKNOWN]
3168

    
3169

    
3170
def _WarnUnknownFields(fdefs):
3171
  """Prints a warning to stderr if a query included unknown fields.
3172

3173
  @type fdefs: list of L{objects.QueryFieldDefinition}
3174

3175
  """
3176
  unknown = _GetUnknownFields(fdefs)
3177
  if unknown:
3178
    ToStderr("Warning: Queried for unknown fields %s",
3179
             utils.CommaJoin(fdef.name for fdef in unknown))
3180
    return True
3181

    
3182
  return False
3183

    
3184

    
3185
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3186
                format_override=None, verbose=False, force_filter=False,
3187
                namefield=None, qfilter=None, isnumeric=False):
3188
  """Generic implementation for listing all items of a resource.
3189

3190
  @param resource: One of L{constants.QR_VIA_LUXI}
3191
  @type fields: list of strings
3192
  @param fields: List of fields to query for
3193
  @type names: list of strings
3194
  @param names: Names of items to query for
3195
  @type unit: string or None
3196
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3197
    None for automatic choice (human-readable for non-separator usage,
3198
    otherwise megabytes); this is a one-letter string
3199
  @type separator: string or None
3200
  @param separator: String used to separate fields
3201
  @type header: bool
3202
  @param header: Whether to show header row
3203
  @type force_filter: bool
3204
  @param force_filter: Whether to always treat names as filter
3205
  @type format_override: dict
3206
  @param format_override: Dictionary for overriding field formatting functions,
3207
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3208
  @type verbose: boolean
3209
  @param verbose: whether to use verbose field descriptions or not
3210
  @type namefield: string
3211
  @param namefield: Name of field to use for simple filters (see
3212
    L{qlang.MakeFilter} for details)
3213
  @type qfilter: list or None
3214
  @param qfilter: Query filter (in addition to names)
3215
  @param isnumeric: bool
3216
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3217
    any simple filters built by namefield should use integer values to
3218
    reflect that
3219

3220
  """
3221
  if not names:
3222
    names = None
3223

    
3224
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3225
                                isnumeric=isnumeric)
3226

    
3227
  if qfilter is None:
3228
    qfilter = namefilter
3229
  elif namefilter is not None:
3230
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3231

    
3232
  if cl is None:
3233
    cl = GetClient()
3234

    
3235
  response = cl.Query(resource, fields, qfilter)
3236

    
3237
  found_unknown = _WarnUnknownFields(response.fields)
3238

    
3239
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3240
                                     header=header,
3241
                                     format_override=format_override,
3242
                                     verbose=verbose)
3243

    
3244
  for line in data:
3245
    ToStdout(line)
3246

    
3247
  assert ((found_unknown and status == QR_UNKNOWN) or
3248
          (not found_unknown and status != QR_UNKNOWN))
3249

    
3250
  if status == QR_UNKNOWN:
3251
    return constants.EXIT_UNKNOWN_FIELD
3252

    
3253
  # TODO: Should the list command fail if not all data could be collected?
3254
  return constants.EXIT_SUCCESS
3255

    
3256

    
3257
def _FieldDescValues(fdef):
3258
  """Helper function for L{GenericListFields} to get query field description.
3259

3260
  @type fdef: L{objects.QueryFieldDefinition}
3261
  @rtype: list
3262

3263
  """
3264
  return [
3265
    fdef.name,
3266
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3267
    fdef.title,
3268
    fdef.doc,
3269
    ]
3270

    
3271

    
3272
def GenericListFields(resource, fields, separator, header, cl=None):
3273
  """Generic implementation for listing fields for a resource.
3274

3275
  @param resource: One of L{constants.QR_VIA_LUXI}
3276
  @type fields: list of strings
3277
  @param fields: List of fields to query for
3278
  @type separator: string or None
3279
  @param separator: String used to separate fields
3280
  @type header: bool
3281
  @param header: Whether to show header row
3282

3283
  """
3284
  if cl is None:
3285
    cl = GetClient()
3286

    
3287
  if not fields:
3288
    fields = None
3289

    
3290
  response = cl.QueryFields(resource, fields)
3291

    
3292
  found_unknown = _WarnUnknownFields(response.fields)
3293

    
3294
  columns = [
3295
    TableColumn("Name", str, False),
3296
    TableColumn("Type", str, False),
3297
    TableColumn("Title", str, False),
3298
    TableColumn("Description", str, False),
3299
    ]
3300

    
3301
  rows = map(_FieldDescValues, response.fields)
3302

    
3303
  for line in FormatTable(rows, columns, header, separator):
3304
    ToStdout(line)
3305

    
3306
  if found_unknown:
3307
    return constants.EXIT_UNKNOWN_FIELD
3308

    
3309
  return constants.EXIT_SUCCESS
3310

    
3311

    
3312
class TableColumn:
3313
  """Describes a column for L{FormatTable}.
3314

3315
  """
3316
  def __init__(self, title, fn, align_right):
3317
    """Initializes this class.
3318

3319
    @type title: string
3320
    @param title: Column title
3321
    @type fn: callable
3322
    @param fn: Formatting function
3323
    @type align_right: bool
3324
    @param align_right: Whether to align values on the right-hand side
3325

3326
    """
3327
    self.title = title
3328
    self.format = fn
3329
    self.align_right = align_right
3330

    
3331

    
3332
def _GetColFormatString(width, align_right):
3333
  """Returns the format string for a field.
3334

3335
  """
3336
  if align_right:
3337
    sign = ""
3338
  else:
3339
    sign = "-"
3340

    
3341
  return "%%%s%ss" % (sign, width)
3342

    
3343

    
3344
def FormatTable(rows, columns, header, separator):
3345
  """Formats data as a table.
3346

3347
  @type rows: list of lists
3348
  @param rows: Row data, one list per row
3349
  @type columns: list of L{TableColumn}
3350
  @param columns: Column descriptions
3351
  @type header: bool
3352
  @param header: Whether to show header row
3353
  @type separator: string or None
3354
  @param separator: String used to separate columns
3355

3356
  """
3357
  if header:
3358
    data = [[col.title for col in columns]]
3359
    colwidth = [len(col.title) for col in columns]
3360
  else:
3361
    data = []
3362
    colwidth = [0 for _ in columns]
3363

    
3364
  # Format row data
3365
  for row in rows:
3366
    assert len(row) == len(columns)
3367

    
3368
    formatted = [col.format(value) for value, col in zip(row, columns)]
3369

    
3370
    if separator is None:
3371
      # Update column widths
3372
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3373
        # Modifying a list's items while iterating is fine
3374
        colwidth[idx] = max(oldwidth, len(value))
3375

    
3376
    data.append(formatted)
3377

    
3378
  if separator is not None:
3379
    # Return early if a separator is used
3380
    return [separator.join(row) for row in data]
3381

    
3382
  if columns and not columns[-1].align_right:
3383
    # Avoid unnecessary spaces at end of line
3384
    colwidth[-1] = 0
3385

    
3386
  # Build format string
3387
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3388
                  for col, width in zip(columns, colwidth)])
3389

    
3390
  return [fmt % tuple(row) for row in data]
3391

    
3392

    
3393
def FormatTimestamp(ts):
3394
  """Formats a given timestamp.
3395

3396
  @type ts: timestamp
3397
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3398

3399
  @rtype: string
3400
  @return: a string with the formatted timestamp
3401

3402
  """
3403
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3404
    return "?"
3405

    
3406
  (sec, usecs) = ts
3407
  return utils.FormatTime(sec, usecs=usecs)
3408

    
3409

    
3410
def ParseTimespec(value):
3411
  """Parse a time specification.
3412

3413
  The following suffixed will be recognized:
3414

3415
    - s: seconds
3416
    - m: minutes
3417
    - h: hours
3418
    - d: day
3419
    - w: weeks
3420

3421
  Without any suffix, the value will be taken to be in seconds.
3422

3423
  """
3424
  value = str(value)
3425
  if not value:
3426
    raise errors.OpPrereqError("Empty time specification passed",
3427
                               errors.ECODE_INVAL)
3428
  suffix_map = {
3429
    "s": 1,
3430
    "m": 60,
3431
    "h": 3600,
3432
    "d": 86400,
3433
    "w": 604800,
3434
    }
3435
  if value[-1] not in suffix_map:
3436
    try:
3437
      value = int(value)
3438
    except (TypeError, ValueError):
3439
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3440
                                 errors.ECODE_INVAL)
3441
  else:
3442
    multiplier = suffix_map[value[-1]]
3443
    value = value[:-1]
3444
    if not value: # no data left after stripping the suffix
3445
      raise errors.OpPrereqError("Invalid time specification (only"
3446
                                 " suffix passed)", errors.ECODE_INVAL)
3447
    try:
3448
      value = int(value) * multiplier
3449
    except (TypeError, ValueError):
3450
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3451
                                 errors.ECODE_INVAL)
3452
  return value
3453

    
3454

    
3455
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3456
                   filter_master=False, nodegroup=None):
3457
  """Returns the names of online nodes.
3458

3459
  This function will also log a warning on stderr with the names of
3460
  the online nodes.
3461

3462
  @param nodes: if not empty, use only this subset of nodes (minus the
3463
      offline ones)
3464
  @param cl: if not None, luxi client to use
3465
  @type nowarn: boolean
3466
  @param nowarn: by default, this function will output a note with the
3467
      offline nodes that are skipped; if this parameter is True the
3468
      note is not displayed
3469
  @type secondary_ips: boolean
3470
  @param secondary_ips: if True, return the secondary IPs instead of the
3471
      names, useful for doing network traffic over the replication interface
3472
      (if any)
3473
  @type filter_master: boolean
3474
  @param filter_master: if True, do not return the master node in the list
3475
      (useful in coordination with secondary_ips where we cannot check our
3476
      node name against the list)
3477
  @type nodegroup: string
3478
  @param nodegroup: If set, only return nodes in this node group
3479

3480
  """
3481
  if cl is None:
3482
    cl = GetClient()
3483

    
3484
  qfilter = []
3485

    
3486
  if nodes:
3487
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3488

    
3489
  if nodegroup is not None:
3490
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3491
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3492

    
3493
  if filter_master:
3494
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3495

    
3496
  if qfilter:
3497
    if len(qfilter) > 1:
3498
      final_filter = [qlang.OP_AND] + qfilter
3499
    else:
3500
      assert len(qfilter) == 1
3501
      final_filter = qfilter[0]
3502
  else:
3503
    final_filter = None
3504

    
3505
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3506

    
3507
  def _IsOffline(row):
3508
    (_, (_, offline), _) = row
3509
    return offline
3510

    
3511
  def _GetName(row):
3512
    ((_, name), _, _) = row
3513
    return name
3514

    
3515
  def _GetSip(row):
3516
    (_, _, (_, sip)) = row
3517
    return sip
3518

    
3519
  (offline, online) = compat.partition(result.data, _IsOffline)
3520

    
3521
  if offline and not nowarn:
3522
    ToStderr("Note: skipping offline node(s): %s" %
3523
             utils.CommaJoin(map(_GetName, offline)))
3524

    
3525
  if secondary_ips:
3526
    fn = _GetSip
3527
  else:
3528
    fn = _GetName
3529

    
3530
  return map(fn, online)
3531

    
3532

    
3533
def _ToStream(stream, txt, *args):
3534
  """Write a message to a stream, bypassing the logging system
3535

3536
  @type stream: file object
3537
  @param stream: the file to which we should write
3538
  @type txt: str
3539
  @param txt: the message
3540

3541
  """
3542
  try:
3543
    if args:
3544
      args = tuple(args)
3545
      stream.write(txt % args)
3546
    else:
3547
      stream.write(txt)
3548
    stream.write("\n")
3549
    stream.flush()
3550
  except IOError, err:
3551
    if err.errno == errno.EPIPE:
3552
      # our terminal went away, we'll exit
3553
      sys.exit(constants.EXIT_FAILURE)
3554
    else:
3555
      raise
3556

    
3557

    
3558
def ToStdout(txt, *args):
3559
  """Write a message to stdout only, bypassing the logging system
3560

3561
  This is just a wrapper over _ToStream.
3562

3563
  @type txt: str
3564
  @param txt: the message
3565

3566
  """
3567
  _ToStream(sys.stdout, txt, *args)
3568

    
3569

    
3570
def ToStderr(txt, *args):
3571
  """Write a message to stderr only, bypassing the logging system
3572

3573
  This is just a wrapper over _ToStream.
3574

3575
  @type txt: str
3576
  @param txt: the message
3577

3578
  """
3579
  _ToStream(sys.stderr, txt, *args)
3580

    
3581

    
3582
class JobExecutor(object):
3583
  """Class which manages the submission and execution of multiple jobs.
3584

3585
  Note that instances of this class should not be reused between
3586
  GetResults() calls.
3587

3588
  """
3589
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3590
    self.queue = []
3591
    if cl is None:
3592
      cl = GetClient()
3593
    self.cl = cl
3594
    self.verbose = verbose
3595
    self.jobs = []
3596
    self.opts = opts
3597
    self.feedback_fn = feedback_fn
3598
    self._counter = itertools.count()
3599

    
3600
  @staticmethod
3601
  def _IfName(name, fmt):
3602
    """Helper function for formatting name.
3603

3604
    """
3605
    if name:
3606
      return fmt % name
3607

    
3608
    return ""
3609

    
3610
  def QueueJob(self, name, *ops):
3611
    """Record a job for later submit.
3612

3613
    @type name: string
3614
    @param name: a description of the job, will be used in WaitJobSet
3615

3616
    """
3617
    SetGenericOpcodeOpts(ops, self.opts)
3618
    self.queue.append((self._counter.next(), name, ops))
3619

    
3620
  def AddJobId(self, name, status, job_id):
3621
    """Adds a job ID to the internal queue.
3622

3623
    """
3624
    self.jobs.append((self._counter.next(), status, job_id, name))
3625

    
3626
  def SubmitPending(self, each=False):
3627
    """Submit all pending jobs.
3628

3629
    """
3630
    if each:
3631
      results = []
3632
      for (_, _, ops) in self.queue:
3633
        # SubmitJob will remove the success status, but raise an exception if
3634
        # the submission fails, so we'll notice that anyway.
3635
        results.append([True, self.cl.SubmitJob(ops)[0]])
3636
    else:
3637
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3638
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3639
      self.jobs.append((idx, status, data, name))
3640

    
3641
  def _ChooseJob(self):
3642
    """Choose a non-waiting/queued job to poll next.
3643

3644
    """
3645
    assert self.jobs, "_ChooseJob called with empty job list"
3646

    
3647
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3648
                               ["status"])
3649
    assert result
3650

    
3651
    for job_data, status in zip(self.jobs, result):
3652
      if (isinstance(status, list) and status and
3653
          status[0] in (constants.JOB_STATUS_QUEUED,
3654
                        constants.JOB_STATUS_WAITING,
3655
                        constants.JOB_STATUS_CANCELING)):
3656
        # job is still present and waiting
3657
        continue
3658
      # good candidate found (either running job or lost job)
3659
      self.jobs.remove(job_data)
3660
      return job_data
3661

    
3662
    # no job found
3663
    return self.jobs.pop(0)
3664

    
3665
  def GetResults(self):
3666
    """Wait for and return the results of all jobs.
3667

3668
    @rtype: list
3669
    @return: list of tuples (success, job results), in the same order
3670
        as the submitted jobs; if a job has failed, instead of the result
3671
        there will be the error message
3672

3673
    """
3674
    if not self.jobs:
3675
      self.SubmitPending()
3676
    results = []
3677
    if self.verbose:
3678
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3679
      if ok_jobs:
3680
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3681

    
3682
    # first, remove any non-submitted jobs
3683
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3684
    for idx, _, jid, name in failures:
3685
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3686
      results.append((idx, False, jid))
3687

    
3688
    while self.jobs:
3689
      (idx, _, jid, name) = self._ChooseJob()
3690
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3691
      try:
3692
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3693
        success = True
3694
      except errors.JobLost, err:
3695
        _, job_result = FormatError(err)
3696
        ToStderr("Job %s%s has been archived, cannot check its result",
3697
                 jid, self._IfName(name, " for %s"))
3698
        success = False
3699
      except (errors.GenericError, luxi.ProtocolError), err:
3700
        _, job_result = FormatError(err)
3701
        success = False
3702
        # the error message will always be shown, verbose or not
3703
        ToStderr("Job %s%s has failed: %s",
3704
                 jid, self._IfName(name, " for %s"), job_result)
3705

    
3706
      results.append((idx, success, job_result))
3707

    
3708
    # sort based on the index, then drop it
3709
    results.sort()
3710
    results = [i[1:] for i in results]
3711

    
3712
    return results
3713

    
3714
  def WaitOrShow(self, wait):
3715
    """Wait for job results or only print the job IDs.
3716

3717
    @type wait: boolean
3718
    @param wait: whether to wait or not
3719

3720
    """
3721
    if wait:
3722
      return self.GetResults()
3723
    else:
3724
      if not self.jobs:
3725
        self.SubmitPending()
3726
      for _, status, result, name in self.jobs:
3727
        if status:
3728
          ToStdout("%s: %s", result, name)
3729
        else:
3730
          ToStderr("Failure for %s: %s", name, result)
3731
      return [row[1:3] for row in self.jobs]
3732

    
3733

    
3734
def FormatParamsDictInfo(param_dict, actual):
3735
  """Formats a parameter dictionary.
3736

3737
  @type param_dict: dict
3738
  @param param_dict: the own parameters
3739
  @type actual: dict
3740
  @param actual: the current parameter set (including defaults)
3741
  @rtype: dict
3742
  @return: dictionary where the value of each parameter is either a fully
3743
      formatted string or a dictionary containing formatted strings
3744

3745
  """
3746
  ret = {}
3747
  for (key, data) in actual.items():
3748
    if isinstance(data, dict) and data:
3749
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3750
    else:
3751
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3752
  return ret
3753

    
3754

    
3755
def _FormatListInfoDefault(data, def_data):
3756
  if data is not None:
3757
    ret = utils.CommaJoin(data)
3758
  else:
3759
    ret = "default (%s)" % utils.CommaJoin(def_data)
3760
  return ret
3761

    
3762

    
3763
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3764
  """Formats an instance policy.
3765

3766
  @type custom_ipolicy: dict
3767
  @param custom_ipolicy: own policy
3768
  @type eff_ipolicy: dict
3769
  @param eff_ipolicy: effective policy (including defaults); ignored for
3770
      cluster
3771
  @type iscluster: bool
3772
  @param iscluster: the policy is at cluster level
3773
  @rtype: list of pairs
3774
  @return: formatted data, suitable for L{PrintGenericInfo}
3775

3776
  """
3777
  if iscluster:
3778
    eff_ipolicy = custom_ipolicy
3779

    
3780
  minmax_out = []
3781
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3782
  if custom_minmax:
3783
    for (k, minmax) in enumerate(custom_minmax):
3784
      minmax_out.append([
3785
        ("%s/%s" % (key, k),
3786
         FormatParamsDictInfo(minmax[key], minmax[key]))
3787
        for key in constants.ISPECS_MINMAX_KEYS
3788
        ])
3789
  else:
3790
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3791
      minmax_out.append([
3792
        ("%s/%s" % (key, k),
3793
         FormatParamsDictInfo({}, minmax[key]))
3794
        for key in constants.ISPECS_MINMAX_KEYS
3795
        ])
3796
  ret = [("bounds specs", minmax_out)]
3797

    
3798
  if iscluster:
3799
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3800
    ret.append(
3801
      (constants.ISPECS_STD,
3802
       FormatParamsDictInfo(stdspecs, stdspecs))
3803
      )
3804

    
3805
  ret.append(
3806
    ("allowed disk templates",
3807
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3808
                            eff_ipolicy[constants.IPOLICY_DTS]))
3809
    )
3810
  ret.extend([
3811
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3812
    for key in constants.IPOLICY_PARAMETERS
3813
    ])
3814
  return ret
3815

    
3816

    
3817
def _PrintSpecsParameters(buf, specs):
3818
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3819
  buf.write(",".join(values))
3820

    
3821

    
3822
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3823
  """Print the command option used to generate the given instance policy.
3824

3825
  Currently only the parts dealing with specs are supported.
3826

3827
  @type buf: StringIO
3828
  @param buf: stream to write into
3829
  @type ipolicy: dict
3830
  @param ipolicy: instance policy
3831
  @type isgroup: bool
3832
  @param isgroup: whether the policy is at group level
3833

3834
  """
3835
  if not isgroup:
3836
    stdspecs = ipolicy.get("std")
3837
    if stdspecs:
3838
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3839
      _PrintSpecsParameters(buf, stdspecs)
3840
  minmaxes = ipolicy.get("minmax", [])
3841
  first = True
3842
  for minmax in minmaxes:
3843
    minspecs = minmax.get("min")
3844
    maxspecs = minmax.get("max")
3845
    if minspecs and maxspecs:
3846
      if first:
3847
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3848
        first = False
3849
      else:
3850
        buf.write("//")
3851
      buf.write("min:")
3852
      _PrintSpecsParameters(buf, minspecs)
3853
      buf.write("/max:")
3854
      _PrintSpecsParameters(buf, maxspecs)
3855

    
3856

    
3857
def ConfirmOperation(names, list_type, text, extra=""):
3858
  """Ask the user to confirm an operation on a list of list_type.
3859

3860
  This function is used to request confirmation for doing an operation
3861
  on a given list of list_type.
3862

3863
  @type names: list
3864
  @param names: the list of names that we display when
3865
      we ask for confirmation
3866
  @type list_type: str
3867
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3868
  @type text: str
3869
  @param text: the operation that the user should confirm
3870
  @rtype: boolean
3871
  @return: True or False depending on user's confirmation.
3872

3873
  """
3874
  count = len(names)
3875
  msg = ("The %s will operate on %d %s.\n%s"
3876
         "Do you want to continue?" % (text, count, list_type, extra))
3877
  affected = (("\nAffected %s:\n" % list_type) +
3878
              "\n".join(["  %s" % name for name in names]))
3879

    
3880
  choices = [("y", True, "Yes, execute the %s" % text),
3881
             ("n", False, "No, abort the %s" % text)]
3882

    
3883
  if count > 20:
3884
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3885
    question = msg
3886
  else:
3887
    question = msg + affected
3888

    
3889
  choice = AskUser(question, choices)
3890
  if choice == "v":
3891
    choices.pop(1)
3892
    choice = AskUser(msg + affected, choices)
3893
  return choice
3894

    
3895

    
3896
def _MaybeParseUnit(elements):
3897
  """Parses and returns an array of potential values with units.
3898

3899
  """
3900
  parsed = {}
3901
  for k, v in elements.items():
3902
    if v == constants.VALUE_DEFAULT:
3903
      parsed[k] = v
3904
    else:
3905
      parsed[k] = utils.ParseUnit(v)
3906
  return parsed
3907

    
3908

    
3909
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3910
                             ispecs_disk_count, ispecs_disk_size,
3911
                             ispecs_nic_count, group_ipolicy, fill_all):
3912
  try:
3913
    if ispecs_mem_size:
3914
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3915
    if ispecs_disk_size:
3916
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3917
  except (TypeError, ValueError, errors.UnitParseError), err:
3918
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3919
                               " in policy: %s" %
3920
                               (ispecs_disk_size, ispecs_mem_size, err),
3921
                               errors.ECODE_INVAL)
3922

    
3923
  # prepare ipolicy dict
3924
  ispecs_transposed = {
3925
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3926
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3927
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3928
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3929
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3930
    }
3931

    
3932
  # first, check that the values given are correct
3933
  if group_ipolicy:
3934
    forced_type = TISPECS_GROUP_TYPES
3935
  else:
3936
    forced_type = TISPECS_CLUSTER_TYPES
3937
  for specs in ispecs_transposed.values():
3938
    assert type(specs) is dict
3939
    utils.ForceDictType(specs, forced_type)
3940

    
3941
  # then transpose
3942
  ispecs = {
3943
    constants.ISPECS_MIN: {},
3944
    constants.ISPECS_MAX: {},
3945
    constants.ISPECS_STD: {},
3946
    }
3947
  for (name, specs) in ispecs_transposed.iteritems():
3948
    assert name in constants.ISPECS_PARAMETERS
3949
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3950
      assert key in ispecs
3951
      ispecs[key][name] = val
3952
  minmax_out = {}
3953
  for key in constants.ISPECS_MINMAX_KEYS:
3954
    if fill_all:
3955
      minmax_out[key] = \
3956
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3957
    else:
3958
      minmax_out[key] = ispecs[key]
3959
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3960
  if fill_all:
3961
    ipolicy[constants.ISPECS_STD] = \
3962
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3963
                         ispecs[constants.ISPECS_STD])
3964
  else:
3965
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3966

    
3967

    
3968
def _ParseSpecUnit(spec, keyname):
3969
  ret = spec.copy()
3970
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3971
    if k in ret:
3972
      try:
3973
        ret[k] = utils.ParseUnit(ret[k])
3974
      except (TypeError, ValueError, errors.UnitParseError), err:
3975
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3976
                                    " specs: %s" % (k, ret[k], keyname, err)),
3977
                                   errors.ECODE_INVAL)
3978
  return ret
3979

    
3980

    
3981
def _ParseISpec(spec, keyname, required):
3982
  ret = _ParseSpecUnit(spec, keyname)
3983
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3984
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3985
  if required and missing:
3986
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3987
                               (keyname, utils.CommaJoin(missing)),
3988
                               errors.ECODE_INVAL)
3989
  return ret
3990

    
3991

    
3992
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3993
  ret = None
3994
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3995
      len(minmax_ispecs[0]) == 1):
3996
    for (key, spec) in minmax_ispecs[0].items():
3997
      # This loop is executed exactly once
3998
      if key in allowed_values and not spec:
3999
        ret = key
4000
  return ret
4001

    
4002

    
4003
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4004
                            group_ipolicy, allowed_values):
4005
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4006
  if found_allowed is not None:
4007
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4008
  elif minmax_ispecs is not None:
4009
    minmax_out = []
4010
    for mmpair in minmax_ispecs:
4011
      mmpair_out = {}
4012
      for (key, spec) in mmpair.items():
4013
        if key not in constants.ISPECS_MINMAX_KEYS:
4014
          msg = "Invalid key in bounds instance specifications: %s" % key
4015
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4016
        mmpair_out[key] = _ParseISpec(spec, key, True)
4017
      minmax_out.append(mmpair_out)
4018
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4019
  if std_ispecs is not None:
4020
    assert not group_ipolicy # This is not an option for gnt-group
4021
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4022

    
4023

    
4024
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4025
                          ispecs_cpu_count=None,
4026
                          ispecs_disk_count=None,
4027
                          ispecs_disk_size=None,
4028
                          ispecs_nic_count=None,
4029
                          minmax_ispecs=None,
4030
                          std_ispecs=None,
4031
                          ipolicy_disk_templates=None,
4032
                          ipolicy_vcpu_ratio=None,
4033
                          ipolicy_spindle_ratio=None,
4034
                          group_ipolicy=False,
4035
                          allowed_values=None,
4036
                          fill_all=False):
4037
  """Creation of instance policy based on command line options.
4038

4039
  @param fill_all: whether for cluster policies we should ensure that
4040
    all values are filled
4041

4042
  """
4043
  assert not (fill_all and allowed_values)
4044

    
4045
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4046
                 ispecs_disk_size or ispecs_nic_count)
4047
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4048
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4049
                               " together with any --ipolicy-xxx-specs option",
4050
                               errors.ECODE_INVAL)
4051

    
4052
  ipolicy_out = objects.MakeEmptyIPolicy()
4053
  if split_specs:
4054
    assert fill_all
4055
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4056
                             ispecs_disk_count, ispecs_disk_size,
4057
                             ispecs_nic_count, group_ipolicy, fill_all)
4058
  elif (minmax_ispecs is not None or std_ispecs is not None):
4059
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4060
                            group_ipolicy, allowed_values)
4061

    
4062
  if ipolicy_disk_templates is not None:
4063
    if allowed_values and ipolicy_disk_templates in allowed_values:
4064
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4065
    else:
4066
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4067
  if ipolicy_vcpu_ratio is not None:
4068
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4069
  if ipolicy_spindle_ratio is not None:
4070
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4071

    
4072
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4073

    
4074
  if not group_ipolicy and fill_all:
4075
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4076

    
4077
  return ipolicy_out
4078

    
4079

    
4080
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4081
  """Formatting core of L{PrintGenericInfo}.
4082

4083
  @param buf: (string) stream to accumulate the result into
4084
  @param data: data to format
4085
  @type level: int
4086
  @param level: depth in the data hierarchy, used for indenting
4087
  @type afterkey: bool
4088
  @param afterkey: True when we are in the middle of a line after a key (used
4089
      to properly add newlines or indentation)
4090

4091
  """
4092
  baseind = "  "
4093
  if isinstance(data, dict):
4094
    if not data:
4095
      buf.write("\n")
4096
    else:
4097
      if afterkey:
4098
        buf.write("\n")
4099
        doindent = True
4100
      else:
4101
        doindent = False
4102
      for key in sorted(data):
4103
        if doindent:
4104
          buf.write(baseind * level)
4105
        else:
4106
          doindent = True
4107
        buf.write(key)
4108
        buf.write(": ")
4109
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4110
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4111
    # list of tuples (an ordered dictionary)
4112
    if afterkey:
4113
      buf.write("\n")
4114
      doindent = True
4115
    else:
4116
      doindent = False
4117
    for (key, val) in data:
4118
      if doindent:
4119
        buf.write(baseind * level)
4120
      else:
4121
        doindent = True
4122
      buf.write(key)
4123
      buf.write(": ")
4124
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4125
  elif isinstance(data, list):
4126
    if not data:
4127
      buf.write("\n")
4128
    else:
4129
      if afterkey:
4130
        buf.write("\n")
4131
        doindent = True
4132
      else:
4133
        doindent = False
4134
      for item in data:
4135
        if doindent:
4136
          buf.write(baseind * level)
4137
        else:
4138
          doindent = True
4139
        buf.write("-")
4140
        buf.write(baseind[1:])
4141
        _SerializeGenericInfo(buf, item, level + 1)
4142
  else:
4143
    # This branch should be only taken for strings, but it's practically
4144
    # impossible to guarantee that no other types are produced somewhere
4145
    buf.write(str(data))
4146
    buf.write("\n")
4147

    
4148

    
4149
def PrintGenericInfo(data):
4150
  """Print information formatted according to the hierarchy.
4151

4152
  The output is a valid YAML string.
4153

4154
  @param data: the data to print. It's a hierarchical structure whose elements
4155
      can be:
4156
        - dictionaries, where keys are strings and values are of any of the
4157
          types listed here
4158
        - lists of pairs (key, value), where key is a string and value is of
4159
          any of the types listed here; it's a way to encode ordered
4160
          dictionaries
4161
        - lists of any of the types listed here
4162
        - strings
4163

4164
  """
4165
  buf = StringIO()
4166
  _SerializeGenericInfo(buf, data, 0)
4167
  ToStdout(buf.getvalue().rstrip("\n"))