Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 848cdc34

History | View | Annotate | Download (136.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HOTPLUG_OPT",
99
  "HVLIST_OPT",
100
  "HVOPTS_OPT",
101
  "HYPERVISOR_OPT",
102
  "IALLOCATOR_OPT",
103
  "DEFAULT_IALLOCATOR_OPT",
104
  "IDENTIFY_DEFAULTS_OPT",
105
  "IGNORE_CONSIST_OPT",
106
  "IGNORE_ERRORS_OPT",
107
  "IGNORE_FAILURES_OPT",
108
  "IGNORE_OFFLINE_OPT",
109
  "IGNORE_REMOVE_FAILURES_OPT",
110
  "IGNORE_SECONDARIES_OPT",
111
  "IGNORE_SIZE_OPT",
112
  "INCLUDEDEFAULTS_OPT",
113
  "INTERVAL_OPT",
114
  "MAC_PREFIX_OPT",
115
  "MAINTAIN_NODE_HEALTH_OPT",
116
  "MASTER_NETDEV_OPT",
117
  "MASTER_NETMASK_OPT",
118
  "MC_OPT",
119
  "MIGRATION_MODE_OPT",
120
  "NET_OPT",
121
  "NETWORK_OPT",
122
  "NETWORK6_OPT",
123
  "NEW_CLUSTER_CERT_OPT",
124
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
125
  "NEW_CONFD_HMAC_KEY_OPT",
126
  "NEW_RAPI_CERT_OPT",
127
  "NEW_PRIMARY_OPT",
128
  "NEW_SECONDARY_OPT",
129
  "NEW_SPICE_CERT_OPT",
130
  "NIC_PARAMS_OPT",
131
  "NOCONFLICTSCHECK_OPT",
132
  "NODE_FORCE_JOIN_OPT",
133
  "NODE_LIST_OPT",
134
  "NODE_PLACEMENT_OPT",
135
  "NODEGROUP_OPT",
136
  "NODE_PARAMS_OPT",
137
  "NODE_POWERED_OPT",
138
  "NODRBD_STORAGE_OPT",
139
  "NOHDR_OPT",
140
  "NOIPCHECK_OPT",
141
  "NO_INSTALL_OPT",
142
  "NONAMECHECK_OPT",
143
  "NOLVM_STORAGE_OPT",
144
  "NOMODIFY_ETCHOSTS_OPT",
145
  "NOMODIFY_SSH_SETUP_OPT",
146
  "NONICS_OPT",
147
  "NONLIVE_OPT",
148
  "NONPLUS1_OPT",
149
  "NORUNTIME_CHGS_OPT",
150
  "NOSHUTDOWN_OPT",
151
  "NOSTART_OPT",
152
  "NOSSH_KEYCHECK_OPT",
153
  "NOVOTING_OPT",
154
  "NO_REMEMBER_OPT",
155
  "NWSYNC_OPT",
156
  "OFFLINE_INST_OPT",
157
  "ONLINE_INST_OPT",
158
  "ON_PRIMARY_OPT",
159
  "ON_SECONDARY_OPT",
160
  "OFFLINE_OPT",
161
  "OSPARAMS_OPT",
162
  "OS_OPT",
163
  "OS_SIZE_OPT",
164
  "OOB_TIMEOUT_OPT",
165
  "POWER_DELAY_OPT",
166
  "PREALLOC_WIPE_DISKS_OPT",
167
  "PRIMARY_IP_VERSION_OPT",
168
  "PRIMARY_ONLY_OPT",
169
  "PRINT_JOBID_OPT",
170
  "PRIORITY_OPT",
171
  "RAPI_CERT_OPT",
172
  "READD_OPT",
173
  "REASON_OPT",
174
  "REBOOT_TYPE_OPT",
175
  "REMOVE_INSTANCE_OPT",
176
  "REMOVE_RESERVED_IPS_OPT",
177
  "REMOVE_UIDS_OPT",
178
  "RESERVED_LVS_OPT",
179
  "RUNTIME_MEM_OPT",
180
  "ROMAN_OPT",
181
  "SECONDARY_IP_OPT",
182
  "SECONDARY_ONLY_OPT",
183
  "SELECT_OS_OPT",
184
  "SEP_OPT",
185
  "SHOWCMD_OPT",
186
  "SHOW_MACHINE_OPT",
187
  "SHUTDOWN_TIMEOUT_OPT",
188
  "SINGLE_NODE_OPT",
189
  "SPECS_CPU_COUNT_OPT",
190
  "SPECS_DISK_COUNT_OPT",
191
  "SPECS_DISK_SIZE_OPT",
192
  "SPECS_MEM_SIZE_OPT",
193
  "SPECS_NIC_COUNT_OPT",
194
  "SPLIT_ISPECS_OPTS",
195
  "IPOLICY_STD_SPECS_OPT",
196
  "IPOLICY_DISK_TEMPLATES",
197
  "IPOLICY_VCPU_RATIO",
198
  "SPICE_CACERT_OPT",
199
  "SPICE_CERT_OPT",
200
  "SRC_DIR_OPT",
201
  "SRC_NODE_OPT",
202
  "SUBMIT_OPT",
203
  "SUBMIT_OPTS",
204
  "STARTUP_PAUSED_OPT",
205
  "STATIC_OPT",
206
  "SYNC_OPT",
207
  "TAG_ADD_OPT",
208
  "TAG_SRC_OPT",
209
  "TIMEOUT_OPT",
210
  "TO_GROUP_OPT",
211
  "UIDPOOL_OPT",
212
  "USEUNITS_OPT",
213
  "USE_EXTERNAL_MIP_SCRIPT",
214
  "USE_REPL_NET_OPT",
215
  "VERBOSE_OPT",
216
  "VG_NAME_OPT",
217
  "WFSYNC_OPT",
218
  "YES_DOIT_OPT",
219
  "DISK_STATE_OPT",
220
  "HV_STATE_OPT",
221
  "IGNORE_IPOLICY_OPT",
222
  "INSTANCE_POLICY_OPTS",
223
  # Generic functions for CLI programs
224
  "ConfirmOperation",
225
  "CreateIPolicyFromOpts",
226
  "GenericMain",
227
  "GenericInstanceCreate",
228
  "GenericList",
229
  "GenericListFields",
230
  "GetClient",
231
  "GetOnlineNodes",
232
  "JobExecutor",
233
  "JobSubmittedException",
234
  "ParseTimespec",
235
  "RunWhileClusterStopped",
236
  "SubmitOpCode",
237
  "SubmitOrSend",
238
  "UsesRPC",
239
  # Formatting functions
240
  "ToStderr", "ToStdout",
241
  "FormatError",
242
  "FormatQueryResult",
243
  "FormatParamsDictInfo",
244
  "FormatPolicyInfo",
245
  "PrintIPolicyCommand",
246
  "PrintGenericInfo",
247
  "GenerateTable",
248
  "AskUser",
249
  "FormatTimestamp",
250
  "FormatLogMessage",
251
  # Tags functions
252
  "ListTags",
253
  "AddTags",
254
  "RemoveTags",
255
  # command line options support infrastructure
256
  "ARGS_MANY_INSTANCES",
257
  "ARGS_MANY_NODES",
258
  "ARGS_MANY_GROUPS",
259
  "ARGS_MANY_NETWORKS",
260
  "ARGS_NONE",
261
  "ARGS_ONE_INSTANCE",
262
  "ARGS_ONE_NODE",
263
  "ARGS_ONE_GROUP",
264
  "ARGS_ONE_OS",
265
  "ARGS_ONE_NETWORK",
266
  "ArgChoice",
267
  "ArgCommand",
268
  "ArgFile",
269
  "ArgGroup",
270
  "ArgHost",
271
  "ArgInstance",
272
  "ArgJobId",
273
  "ArgNetwork",
274
  "ArgNode",
275
  "ArgOs",
276
  "ArgExtStorage",
277
  "ArgSuggest",
278
  "ArgUnknown",
279
  "OPT_COMPL_INST_ADD_NODES",
280
  "OPT_COMPL_MANY_NODES",
281
  "OPT_COMPL_ONE_IALLOCATOR",
282
  "OPT_COMPL_ONE_INSTANCE",
283
  "OPT_COMPL_ONE_NODE",
284
  "OPT_COMPL_ONE_NODEGROUP",
285
  "OPT_COMPL_ONE_NETWORK",
286
  "OPT_COMPL_ONE_OS",
287
  "OPT_COMPL_ONE_EXTSTORAGE",
288
  "cli_option",
289
  "SplitNodeOption",
290
  "CalculateOSNames",
291
  "ParseFields",
292
  "COMMON_CREATE_OPTS",
293
  ]
294

    
295
NO_PREFIX = "no_"
296
UN_PREFIX = "-"
297

    
298
#: Priorities (sorted)
299
_PRIORITY_NAMES = [
300
  ("low", constants.OP_PRIO_LOW),
301
  ("normal", constants.OP_PRIO_NORMAL),
302
  ("high", constants.OP_PRIO_HIGH),
303
  ]
304

    
305
#: Priority dictionary for easier lookup
306
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
307
# we migrate to Python 2.6
308
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
309

    
310
# Query result status for clients
311
(QR_NORMAL,
312
 QR_UNKNOWN,
313
 QR_INCOMPLETE) = range(3)
314

    
315
#: Maximum batch size for ChooseJob
316
_CHOOSE_BATCH = 25
317

    
318

    
319
# constants used to create InstancePolicy dictionary
320
TISPECS_GROUP_TYPES = {
321
  constants.ISPECS_MIN: constants.VTYPE_INT,
322
  constants.ISPECS_MAX: constants.VTYPE_INT,
323
  }
324

    
325
TISPECS_CLUSTER_TYPES = {
326
  constants.ISPECS_MIN: constants.VTYPE_INT,
327
  constants.ISPECS_MAX: constants.VTYPE_INT,
328
  constants.ISPECS_STD: constants.VTYPE_INT,
329
  }
330

    
331
#: User-friendly names for query2 field types
332
_QFT_NAMES = {
333
  constants.QFT_UNKNOWN: "Unknown",
334
  constants.QFT_TEXT: "Text",
335
  constants.QFT_BOOL: "Boolean",
336
  constants.QFT_NUMBER: "Number",
337
  constants.QFT_UNIT: "Storage size",
338
  constants.QFT_TIMESTAMP: "Timestamp",
339
  constants.QFT_OTHER: "Custom",
340
  }
341

    
342

    
343
class _Argument:
344
  def __init__(self, min=0, max=None): # pylint: disable=W0622
345
    self.min = min
346
    self.max = max
347

    
348
  def __repr__(self):
349
    return ("<%s min=%s max=%s>" %
350
            (self.__class__.__name__, self.min, self.max))
351

    
352

    
353
class ArgSuggest(_Argument):
354
  """Suggesting argument.
355

356
  Value can be any of the ones passed to the constructor.
357

358
  """
359
  # pylint: disable=W0622
360
  def __init__(self, min=0, max=None, choices=None):
361
    _Argument.__init__(self, min=min, max=max)
362
    self.choices = choices
363

    
364
  def __repr__(self):
365
    return ("<%s min=%s max=%s choices=%r>" %
366
            (self.__class__.__name__, self.min, self.max, self.choices))
367

    
368

    
369
class ArgChoice(ArgSuggest):
370
  """Choice argument.
371

372
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
373
  but value must be one of the choices.
374

375
  """
376

    
377

    
378
class ArgUnknown(_Argument):
379
  """Unknown argument to program (e.g. determined at runtime).
380

381
  """
382

    
383

    
384
class ArgInstance(_Argument):
385
  """Instances argument.
386

387
  """
388

    
389

    
390
class ArgNode(_Argument):
391
  """Node argument.
392

393
  """
394

    
395

    
396
class ArgNetwork(_Argument):
397
  """Network argument.
398

399
  """
400

    
401

    
402
class ArgGroup(_Argument):
403
  """Node group argument.
404

405
  """
406

    
407

    
408
class ArgJobId(_Argument):
409
  """Job ID argument.
410

411
  """
412

    
413

    
414
class ArgFile(_Argument):
415
  """File path argument.
416

417
  """
418

    
419

    
420
class ArgCommand(_Argument):
421
  """Command argument.
422

423
  """
424

    
425

    
426
class ArgHost(_Argument):
427
  """Host argument.
428

429
  """
430

    
431

    
432
class ArgOs(_Argument):
433
  """OS argument.
434

435
  """
436

    
437

    
438
class ArgExtStorage(_Argument):
439
  """ExtStorage argument.
440

441
  """
442

    
443

    
444
ARGS_NONE = []
445
ARGS_MANY_INSTANCES = [ArgInstance()]
446
ARGS_MANY_NETWORKS = [ArgNetwork()]
447
ARGS_MANY_NODES = [ArgNode()]
448
ARGS_MANY_GROUPS = [ArgGroup()]
449
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
450
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
451
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
452
# TODO
453
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
454
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
455

    
456

    
457
def _ExtractTagsObject(opts, args):
458
  """Extract the tag type object.
459

460
  Note that this function will modify its args parameter.
461

462
  """
463
  if not hasattr(opts, "tag_type"):
464
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
465
  kind = opts.tag_type
466
  if kind == constants.TAG_CLUSTER:
467
    retval = kind, None
468
  elif kind in (constants.TAG_NODEGROUP,
469
                constants.TAG_NODE,
470
                constants.TAG_NETWORK,
471
                constants.TAG_INSTANCE):
472
    if not args:
473
      raise errors.OpPrereqError("no arguments passed to the command",
474
                                 errors.ECODE_INVAL)
475
    name = args.pop(0)
476
    retval = kind, name
477
  else:
478
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
479
  return retval
480

    
481

    
482
def _ExtendTags(opts, args):
483
  """Extend the args if a source file has been given.
484

485
  This function will extend the tags with the contents of the file
486
  passed in the 'tags_source' attribute of the opts parameter. A file
487
  named '-' will be replaced by stdin.
488

489
  """
490
  fname = opts.tags_source
491
  if fname is None:
492
    return
493
  if fname == "-":
494
    new_fh = sys.stdin
495
  else:
496
    new_fh = open(fname, "r")
497
  new_data = []
498
  try:
499
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
500
    # because of python bug 1633941
501
    while True:
502
      line = new_fh.readline()
503
      if not line:
504
        break
505
      new_data.append(line.strip())
506
  finally:
507
    new_fh.close()
508
  args.extend(new_data)
509

    
510

    
511
def ListTags(opts, args):
512
  """List the tags on a given object.
513

514
  This is a generic implementation that knows how to deal with all
515
  three cases of tag objects (cluster, node, instance). The opts
516
  argument is expected to contain a tag_type field denoting what
517
  object type we work on.
518

519
  """
520
  kind, name = _ExtractTagsObject(opts, args)
521
  cl = GetClient(query=True)
522
  result = cl.QueryTags(kind, name)
523
  result = list(result)
524
  result.sort()
525
  for tag in result:
526
    ToStdout(tag)
527

    
528

    
529
def AddTags(opts, args):
530
  """Add tags on a given object.
531

532
  This is a generic implementation that knows how to deal with all
533
  three cases of tag objects (cluster, node, instance). The opts
534
  argument is expected to contain a tag_type field denoting what
535
  object type we work on.
536

537
  """
538
  kind, name = _ExtractTagsObject(opts, args)
539
  _ExtendTags(opts, args)
540
  if not args:
541
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
542
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
543
  SubmitOrSend(op, opts)
544

    
545

    
546
def RemoveTags(opts, args):
547
  """Remove tags from a given object.
548

549
  This is a generic implementation that knows how to deal with all
550
  three cases of tag objects (cluster, node, instance). The opts
551
  argument is expected to contain a tag_type field denoting what
552
  object type we work on.
553

554
  """
555
  kind, name = _ExtractTagsObject(opts, args)
556
  _ExtendTags(opts, args)
557
  if not args:
558
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
559
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
560
  SubmitOrSend(op, opts)
561

    
562

    
563
def check_unit(option, opt, value): # pylint: disable=W0613
564
  """OptParsers custom converter for units.
565

566
  """
567
  try:
568
    return utils.ParseUnit(value)
569
  except errors.UnitParseError, err:
570
    raise OptionValueError("option %s: %s" % (opt, err))
571

    
572

    
573
def _SplitKeyVal(opt, data, parse_prefixes):
574
  """Convert a KeyVal string into a dict.
575

576
  This function will convert a key=val[,...] string into a dict. Empty
577
  values will be converted specially: keys which have the prefix 'no_'
578
  will have the value=False and the prefix stripped, keys with the prefix
579
  "-" will have value=None and the prefix stripped, and the others will
580
  have value=True.
581

582
  @type opt: string
583
  @param opt: a string holding the option name for which we process the
584
      data, used in building error messages
585
  @type data: string
586
  @param data: a string of the format key=val,key=val,...
587
  @type parse_prefixes: bool
588
  @param parse_prefixes: whether to handle prefixes specially
589
  @rtype: dict
590
  @return: {key=val, key=val}
591
  @raises errors.ParameterError: if there are duplicate keys
592

593
  """
594
  kv_dict = {}
595
  if data:
596
    for elem in utils.UnescapeAndSplit(data, sep=","):
597
      if "=" in elem:
598
        key, val = elem.split("=", 1)
599
      elif parse_prefixes:
600
        if elem.startswith(NO_PREFIX):
601
          key, val = elem[len(NO_PREFIX):], False
602
        elif elem.startswith(UN_PREFIX):
603
          key, val = elem[len(UN_PREFIX):], None
604
        else:
605
          key, val = elem, True
606
      else:
607
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
608
                                    (elem, opt))
609
      if key in kv_dict:
610
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
611
                                    (key, opt))
612
      kv_dict[key] = val
613
  return kv_dict
614

    
615

    
616
def _SplitIdentKeyVal(opt, value, parse_prefixes):
617
  """Helper function to parse "ident:key=val,key=val" options.
618

619
  @type opt: string
620
  @param opt: option name, used in error messages
621
  @type value: string
622
  @param value: expected to be in the format "ident:key=val,key=val,..."
623
  @type parse_prefixes: bool
624
  @param parse_prefixes: whether to handle prefixes specially (see
625
      L{_SplitKeyVal})
626
  @rtype: tuple
627
  @return: (ident, {key=val, key=val})
628
  @raises errors.ParameterError: in case of duplicates or other parsing errors
629

630
  """
631
  if ":" not in value:
632
    ident, rest = value, ""
633
  else:
634
    ident, rest = value.split(":", 1)
635

    
636
  if parse_prefixes and ident.startswith(NO_PREFIX):
637
    if rest:
638
      msg = "Cannot pass options when removing parameter groups: %s" % value
639
      raise errors.ParameterError(msg)
640
    retval = (ident[len(NO_PREFIX):], False)
641
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
642
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
643
    if rest:
644
      msg = "Cannot pass options when removing parameter groups: %s" % value
645
      raise errors.ParameterError(msg)
646
    retval = (ident[len(UN_PREFIX):], None)
647
  else:
648
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
649
    retval = (ident, kv_dict)
650
  return retval
651

    
652

    
653
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
654
  """Custom parser for ident:key=val,key=val options.
655

656
  This will store the parsed values as a tuple (ident, {key: val}). As such,
657
  multiple uses of this option via action=append is possible.
658

659
  """
660
  return _SplitIdentKeyVal(opt, value, True)
661

    
662

    
663
def check_key_val(option, opt, value):  # pylint: disable=W0613
664
  """Custom parser class for key=val,key=val options.
665

666
  This will store the parsed values as a dict {key: val}.
667

668
  """
669
  return _SplitKeyVal(opt, value, True)
670

    
671

    
672
def _SplitListKeyVal(opt, value):
673
  retval = {}
674
  for elem in value.split("/"):
675
    if not elem:
676
      raise errors.ParameterError("Empty section in option '%s'" % opt)
677
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
678
    if ident in retval:
679
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
680
             (ident, opt, elem))
681
      raise errors.ParameterError(msg)
682
    retval[ident] = valdict
683
  return retval
684

    
685

    
686
def check_multilist_ident_key_val(_, opt, value):
687
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
688

689
  @rtype: list of dictionary
690
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
691

692
  """
693
  retval = []
694
  for line in value.split("//"):
695
    retval.append(_SplitListKeyVal(opt, line))
696
  return retval
697

    
698

    
699
def check_bool(option, opt, value): # pylint: disable=W0613
700
  """Custom parser for yes/no options.
701

702
  This will store the parsed value as either True or False.
703

704
  """
705
  value = value.lower()
706
  if value == constants.VALUE_FALSE or value == "no":
707
    return False
708
  elif value == constants.VALUE_TRUE or value == "yes":
709
    return True
710
  else:
711
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
712

    
713

    
714
def check_list(option, opt, value): # pylint: disable=W0613
715
  """Custom parser for comma-separated lists.
716

717
  """
718
  # we have to make this explicit check since "".split(",") is [""],
719
  # not an empty list :(
720
  if not value:
721
    return []
722
  else:
723
    return utils.UnescapeAndSplit(value)
724

    
725

    
726
def check_maybefloat(option, opt, value): # pylint: disable=W0613
727
  """Custom parser for float numbers which might be also defaults.
728

729
  """
730
  value = value.lower()
731

    
732
  if value == constants.VALUE_DEFAULT:
733
    return value
734
  else:
735
    return float(value)
736

    
737

    
738
# completion_suggestion is normally a list. Using numeric values not evaluating
739
# to False for dynamic completion.
740
(OPT_COMPL_MANY_NODES,
741
 OPT_COMPL_ONE_NODE,
742
 OPT_COMPL_ONE_INSTANCE,
743
 OPT_COMPL_ONE_OS,
744
 OPT_COMPL_ONE_EXTSTORAGE,
745
 OPT_COMPL_ONE_IALLOCATOR,
746
 OPT_COMPL_ONE_NETWORK,
747
 OPT_COMPL_INST_ADD_NODES,
748
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
749

    
750
OPT_COMPL_ALL = compat.UniqueFrozenset([
751
  OPT_COMPL_MANY_NODES,
752
  OPT_COMPL_ONE_NODE,
753
  OPT_COMPL_ONE_INSTANCE,
754
  OPT_COMPL_ONE_OS,
755
  OPT_COMPL_ONE_EXTSTORAGE,
756
  OPT_COMPL_ONE_IALLOCATOR,
757
  OPT_COMPL_ONE_NETWORK,
758
  OPT_COMPL_INST_ADD_NODES,
759
  OPT_COMPL_ONE_NODEGROUP,
760
  ])
761

    
762

    
763
class CliOption(Option):
764
  """Custom option class for optparse.
765

766
  """
767
  ATTRS = Option.ATTRS + [
768
    "completion_suggest",
769
    ]
770
  TYPES = Option.TYPES + (
771
    "multilistidentkeyval",
772
    "identkeyval",
773
    "keyval",
774
    "unit",
775
    "bool",
776
    "list",
777
    "maybefloat",
778
    )
779
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
780
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
781
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
782
  TYPE_CHECKER["keyval"] = check_key_val
783
  TYPE_CHECKER["unit"] = check_unit
784
  TYPE_CHECKER["bool"] = check_bool
785
  TYPE_CHECKER["list"] = check_list
786
  TYPE_CHECKER["maybefloat"] = check_maybefloat
787

    
788

    
789
# optparse.py sets make_option, so we do it for our own option class, too
790
cli_option = CliOption
791

    
792

    
793
_YORNO = "yes|no"
794

    
795
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
796
                       help="Increase debugging level")
797

    
798
NOHDR_OPT = cli_option("--no-headers", default=False,
799
                       action="store_true", dest="no_headers",
800
                       help="Don't display column headers")
801

    
802
SEP_OPT = cli_option("--separator", default=None,
803
                     action="store", dest="separator",
804
                     help=("Separator between output fields"
805
                           " (defaults to one space)"))
806

    
807
USEUNITS_OPT = cli_option("--units", default=None,
808
                          dest="units", choices=("h", "m", "g", "t"),
809
                          help="Specify units for output (one of h/m/g/t)")
810

    
811
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
812
                        type="string", metavar="FIELDS",
813
                        help="Comma separated list of output fields")
814

    
815
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
816
                       default=False, help="Force the operation")
817

    
818
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
819
                         default=False, help="Do not require confirmation")
820

    
821
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
822
                                  action="store_true", default=False,
823
                                  help=("Ignore offline nodes and do as much"
824
                                        " as possible"))
825

    
826
TAG_ADD_OPT = cli_option("--tags", dest="tags",
827
                         default=None, help="Comma-separated list of instance"
828
                                            " tags")
829

    
830
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
831
                         default=None, help="File with tag names")
832

    
833
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
834
                        default=False, action="store_true",
835
                        help=("Submit the job and return the job ID, but"
836
                              " don't wait for the job to finish"))
837

    
838
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
839
                             default=False, action="store_true",
840
                             help=("Additionally print the job as first line"
841
                                   " on stdout (for scripting)."))
842

    
843
SYNC_OPT = cli_option("--sync", dest="do_locking",
844
                      default=False, action="store_true",
845
                      help=("Grab locks while doing the queries"
846
                            " in order to ensure more consistent results"))
847

    
848
DRY_RUN_OPT = cli_option("--dry-run", default=False,
849
                         action="store_true",
850
                         help=("Do not execute the operation, just run the"
851
                               " check steps and verify if it could be"
852
                               " executed"))
853

    
854
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
855
                         action="store_true",
856
                         help="Increase the verbosity of the operation")
857

    
858
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
859
                              action="store_true", dest="simulate_errors",
860
                              help="Debugging option that makes the operation"
861
                              " treat most runtime checks as failed")
862

    
863
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
864
                        default=True, action="store_false",
865
                        help="Don't wait for sync (DANGEROUS!)")
866

    
867
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
868
                        default=False, action="store_true",
869
                        help="Wait for disks to sync")
870

    
871
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
872
                             action="store_true", default=False,
873
                             help="Enable offline instance")
874

    
875
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
876
                              action="store_true", default=False,
877
                              help="Disable down instance")
878

    
879
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
880
                               help=("Custom disk setup (%s)" %
881
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
882
                               default=None, metavar="TEMPL",
883
                               choices=list(constants.DISK_TEMPLATES))
884

    
885
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
886
                        help="Do not create any network cards for"
887
                        " the instance")
888

    
889
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
890
                               help="Relative path under default cluster-wide"
891
                               " file storage dir to store file-based disks",
892
                               default=None, metavar="<DIR>")
893

    
894
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
895
                                  help="Driver to use for image files",
896
                                  default="loop", metavar="<DRIVER>",
897
                                  choices=list(constants.FILE_DRIVER))
898

    
899
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
900
                            help="Select nodes for the instance automatically"
901
                            " using the <NAME> iallocator plugin",
902
                            default=None, type="string",
903
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
904

    
905
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
906
                                    metavar="<NAME>",
907
                                    help="Set the default instance"
908
                                    " allocator plugin",
909
                                    default=None, type="string",
910
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
911

    
912
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
913
                    metavar="<os>",
914
                    completion_suggest=OPT_COMPL_ONE_OS)
915

    
916
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
917
                          type="keyval", default={},
918
                          help="OS parameters")
919

    
920
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
921
                               action="store_true", default=False,
922
                               help="Force an unknown variant")
923

    
924
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
925
                            action="store_true", default=False,
926
                            help="Do not install the OS (will"
927
                            " enable no-start)")
928

    
929
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
930
                                dest="allow_runtime_chgs",
931
                                default=True, action="store_false",
932
                                help="Don't allow runtime changes")
933

    
934
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
935
                         type="keyval", default={},
936
                         help="Backend parameters")
937

    
938
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
939
                        default={}, dest="hvparams",
940
                        help="Hypervisor parameters")
941

    
942
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
943
                             help="Disk template parameters, in the format"
944
                             " template:option=value,option=value,...",
945
                             type="identkeyval", action="append", default=[])
946

    
947
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
948
                                 type="keyval", default={},
949
                                 help="Memory size specs: list of key=value,"
950
                                " where key is one of min, max, std"
951
                                 " (in MB or using a unit)")
952

    
953
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
954
                                 type="keyval", default={},
955
                                 help="CPU count specs: list of key=value,"
956
                                 " where key is one of min, max, std")
957

    
958
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
959
                                  dest="ispecs_disk_count",
960
                                  type="keyval", default={},
961
                                  help="Disk count specs: list of key=value,"
962
                                  " where key is one of min, max, std")
963

    
964
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
965
                                 type="keyval", default={},
966
                                 help="Disk size specs: list of key=value,"
967
                                 " where key is one of min, max, std"
968
                                 " (in MB or using a unit)")
969

    
970
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
971
                                 type="keyval", default={},
972
                                 help="NIC count specs: list of key=value,"
973
                                 " where key is one of min, max, std")
974

    
975
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
976
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
977
                                      dest="ipolicy_bounds_specs",
978
                                      type="multilistidentkeyval", default=None,
979
                                      help="Complete instance specs limits")
980

    
981
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
982
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
983
                                   dest="ipolicy_std_specs",
984
                                   type="keyval", default=None,
985
                                   help="Complte standard instance specs")
986

    
987
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
988
                                    dest="ipolicy_disk_templates",
989
                                    type="list", default=None,
990
                                    help="Comma-separated list of"
991
                                    " enabled disk templates")
992

    
993
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
994
                                 dest="ipolicy_vcpu_ratio",
995
                                 type="maybefloat", default=None,
996
                                 help="The maximum allowed vcpu-to-cpu ratio")
997

    
998
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
999
                                   dest="ipolicy_spindle_ratio",
1000
                                   type="maybefloat", default=None,
1001
                                   help=("The maximum allowed instances to"
1002
                                         " spindle ratio"))
1003

    
1004
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1005
                            help="Hypervisor and hypervisor options, in the"
1006
                            " format hypervisor:option=value,option=value,...",
1007
                            default=None, type="identkeyval")
1008

    
1009
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1010
                        help="Hypervisor and hypervisor options, in the"
1011
                        " format hypervisor:option=value,option=value,...",
1012
                        default=[], action="append", type="identkeyval")
1013

    
1014
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1015
                           action="store_false",
1016
                           help="Don't check that the instance's IP"
1017
                           " is alive")
1018

    
1019
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1020
                             default=True, action="store_false",
1021
                             help="Don't check that the instance's name"
1022
                             " is resolvable")
1023

    
1024
NET_OPT = cli_option("--net",
1025
                     help="NIC parameters", default=[],
1026
                     dest="nics", action="append", type="identkeyval")
1027

    
1028
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1029
                      dest="disks", action="append", type="identkeyval")
1030

    
1031
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1032
                         help="Comma-separated list of disks"
1033
                         " indices to act on (e.g. 0,2) (optional,"
1034
                         " defaults to all disks)")
1035

    
1036
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1037
                         help="Enforces a single-disk configuration using the"
1038
                         " given disk size, in MiB unless a suffix is used",
1039
                         default=None, type="unit", metavar="<size>")
1040

    
1041
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1042
                                dest="ignore_consistency",
1043
                                action="store_true", default=False,
1044
                                help="Ignore the consistency of the disks on"
1045
                                " the secondary")
1046

    
1047
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1048
                                dest="allow_failover",
1049
                                action="store_true", default=False,
1050
                                help="If migration is not possible fallback to"
1051
                                     " failover")
1052

    
1053
NONLIVE_OPT = cli_option("--non-live", dest="live",
1054
                         default=True, action="store_false",
1055
                         help="Do a non-live migration (this usually means"
1056
                         " freeze the instance, save the state, transfer and"
1057
                         " only then resume running on the secondary node)")
1058

    
1059
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1060
                                default=None,
1061
                                choices=list(constants.HT_MIGRATION_MODES),
1062
                                help="Override default migration mode (choose"
1063
                                " either live or non-live")
1064

    
1065
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1066
                                help="Target node and optional secondary node",
1067
                                metavar="<pnode>[:<snode>]",
1068
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1069

    
1070
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1071
                           action="append", metavar="<node>",
1072
                           help="Use only this node (can be used multiple"
1073
                           " times, if not given defaults to all nodes)",
1074
                           completion_suggest=OPT_COMPL_ONE_NODE)
1075

    
1076
NODEGROUP_OPT_NAME = "--node-group"
1077
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1078
                           dest="nodegroup",
1079
                           help="Node group (name or uuid)",
1080
                           metavar="<nodegroup>",
1081
                           default=None, type="string",
1082
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1083

    
1084
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1085
                             metavar="<node>",
1086
                             completion_suggest=OPT_COMPL_ONE_NODE)
1087

    
1088
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1089
                         action="store_false",
1090
                         help="Don't start the instance after creation")
1091

    
1092
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1093
                         action="store_true", default=False,
1094
                         help="Show command instead of executing it")
1095

    
1096
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1097
                         default=False, action="store_true",
1098
                         help="Instead of performing the migration, try to"
1099
                         " recover from a failed cleanup. This is safe"
1100
                         " to run even if the instance is healthy, but it"
1101
                         " will create extra replication traffic and "
1102
                         " disrupt briefly the replication (like during the"
1103
                         " migration")
1104

    
1105
STATIC_OPT = cli_option("-s", "--static", dest="static",
1106
                        action="store_true", default=False,
1107
                        help="Only show configuration data, not runtime data")
1108

    
1109
ALL_OPT = cli_option("--all", dest="show_all",
1110
                     default=False, action="store_true",
1111
                     help="Show info on all instances on the cluster."
1112
                     " This can take a long time to run, use wisely")
1113

    
1114
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1115
                           action="store_true", default=False,
1116
                           help="Interactive OS reinstall, lists available"
1117
                           " OS templates for selection")
1118

    
1119
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1120
                                 action="store_true", default=False,
1121
                                 help="Remove the instance from the cluster"
1122
                                 " configuration even if there are failures"
1123
                                 " during the removal process")
1124

    
1125
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1126
                                        dest="ignore_remove_failures",
1127
                                        action="store_true", default=False,
1128
                                        help="Remove the instance from the"
1129
                                        " cluster configuration even if there"
1130
                                        " are failures during the removal"
1131
                                        " process")
1132

    
1133
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1134
                                 action="store_true", default=False,
1135
                                 help="Remove the instance from the cluster")
1136

    
1137
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1138
                               help="Specifies the new node for the instance",
1139
                               metavar="NODE", default=None,
1140
                               completion_suggest=OPT_COMPL_ONE_NODE)
1141

    
1142
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1143
                               help="Specifies the new secondary node",
1144
                               metavar="NODE", default=None,
1145
                               completion_suggest=OPT_COMPL_ONE_NODE)
1146

    
1147
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1148
                             help="Specifies the new primary node",
1149
                             metavar="<node>", default=None,
1150
                             completion_suggest=OPT_COMPL_ONE_NODE)
1151

    
1152
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1153
                            default=False, action="store_true",
1154
                            help="Replace the disk(s) on the primary"
1155
                                 " node (applies only to internally mirrored"
1156
                                 " disk templates, e.g. %s)" %
1157
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1158

    
1159
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1160
                              default=False, action="store_true",
1161
                              help="Replace the disk(s) on the secondary"
1162
                                   " node (applies only to internally mirrored"
1163
                                   " disk templates, e.g. %s)" %
1164
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1165

    
1166
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1167
                              default=False, action="store_true",
1168
                              help="Lock all nodes and auto-promote as needed"
1169
                              " to MC status")
1170

    
1171
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1172
                              default=False, action="store_true",
1173
                              help="Automatically replace faulty disks"
1174
                                   " (applies only to internally mirrored"
1175
                                   " disk templates, e.g. %s)" %
1176
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1177

    
1178
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1179
                             default=False, action="store_true",
1180
                             help="Ignore current recorded size"
1181
                             " (useful for forcing activation when"
1182
                             " the recorded size is wrong)")
1183

    
1184
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1185
                          metavar="<node>",
1186
                          completion_suggest=OPT_COMPL_ONE_NODE)
1187

    
1188
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1189
                         metavar="<dir>")
1190

    
1191
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1192
                              help="Specify the secondary ip for the node",
1193
                              metavar="ADDRESS", default=None)
1194

    
1195
READD_OPT = cli_option("--readd", dest="readd",
1196
                       default=False, action="store_true",
1197
                       help="Readd old node after replacing it")
1198

    
1199
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1200
                                default=True, action="store_false",
1201
                                help="Disable SSH key fingerprint checking")
1202

    
1203
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1204
                                 default=False, action="store_true",
1205
                                 help="Force the joining of a node")
1206

    
1207
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1208
                    type="bool", default=None, metavar=_YORNO,
1209
                    help="Set the master_candidate flag on the node")
1210

    
1211
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1212
                         type="bool", default=None,
1213
                         help=("Set the offline flag on the node"
1214
                               " (cluster does not communicate with offline"
1215
                               " nodes)"))
1216

    
1217
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1218
                         type="bool", default=None,
1219
                         help=("Set the drained flag on the node"
1220
                               " (excluded from allocation operations)"))
1221

    
1222
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1223
                              type="bool", default=None, metavar=_YORNO,
1224
                              help="Set the master_capable flag on the node")
1225

    
1226
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1227
                          type="bool", default=None, metavar=_YORNO,
1228
                          help="Set the vm_capable flag on the node")
1229

    
1230
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1231
                             type="bool", default=None, metavar=_YORNO,
1232
                             help="Set the allocatable flag on a volume")
1233

    
1234
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1235
                               help="Disable support for lvm based instances"
1236
                               " (cluster-wide)",
1237
                               action="store_false", default=True)
1238

    
1239
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1240
                            dest="enabled_hypervisors",
1241
                            help="Comma-separated list of hypervisors",
1242
                            type="string", default=None)
1243

    
1244
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1245
                                        dest="enabled_disk_templates",
1246
                                        help="Comma-separated list of "
1247
                                             "disk templates",
1248
                                        type="string", default=None)
1249

    
1250
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1251
                            type="keyval", default={},
1252
                            help="NIC parameters")
1253

    
1254
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1255
                         dest="candidate_pool_size", type="int",
1256
                         help="Set the candidate pool size")
1257

    
1258
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1259
                         help=("Enables LVM and specifies the volume group"
1260
                               " name (cluster-wide) for disk allocation"
1261
                               " [%s]" % constants.DEFAULT_VG),
1262
                         metavar="VG", default=None)
1263

    
1264
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1265
                          help="Destroy cluster", action="store_true")
1266

    
1267
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1268
                          help="Skip node agreement check (dangerous)",
1269
                          action="store_true", default=False)
1270

    
1271
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1272
                            help="Specify the mac prefix for the instance IP"
1273
                            " addresses, in the format XX:XX:XX",
1274
                            metavar="PREFIX",
1275
                            default=None)
1276

    
1277
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1278
                               help="Specify the node interface (cluster-wide)"
1279
                               " on which the master IP address will be added"
1280
                               " (cluster init default: %s)" %
1281
                               constants.DEFAULT_BRIDGE,
1282
                               metavar="NETDEV",
1283
                               default=None)
1284

    
1285
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1286
                                help="Specify the netmask of the master IP",
1287
                                metavar="NETMASK",
1288
                                default=None)
1289

    
1290
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1291
                                     dest="use_external_mip_script",
1292
                                     help="Specify whether to run a"
1293
                                     " user-provided script for the master"
1294
                                     " IP address turnup and"
1295
                                     " turndown operations",
1296
                                     type="bool", metavar=_YORNO, default=None)
1297

    
1298
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1299
                                help="Specify the default directory (cluster-"
1300
                                "wide) for storing the file-based disks [%s]" %
1301
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1302
                                metavar="DIR",
1303
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1304

    
1305
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1306
  "--shared-file-storage-dir",
1307
  dest="shared_file_storage_dir",
1308
  help="Specify the default directory (cluster-wide) for storing the"
1309
  " shared file-based disks [%s]" %
1310
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1311
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1312

    
1313
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1314
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1315
                                   action="store_false", default=True)
1316

    
1317
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1318
                                    help="Don't initialize SSH keys",
1319
                                    action="store_false", default=True)
1320

    
1321
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1322
                             help="Enable parseable error messages",
1323
                             action="store_true", default=False)
1324

    
1325
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1326
                          help="Skip N+1 memory redundancy tests",
1327
                          action="store_true", default=False)
1328

    
1329
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1330
                             help="Type of reboot: soft/hard/full",
1331
                             default=constants.INSTANCE_REBOOT_HARD,
1332
                             metavar="<REBOOT>",
1333
                             choices=list(constants.REBOOT_TYPES))
1334

    
1335
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1336
                                    dest="ignore_secondaries",
1337
                                    default=False, action="store_true",
1338
                                    help="Ignore errors from secondaries")
1339

    
1340
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1341
                            action="store_false", default=True,
1342
                            help="Don't shutdown the instance (unsafe)")
1343

    
1344
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1345
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1346
                         help="Maximum time to wait")
1347

    
1348
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1349
                                  dest="shutdown_timeout", type="int",
1350
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1351
                                  help="Maximum time to wait for instance"
1352
                                  " shutdown")
1353

    
1354
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1355
                          default=None,
1356
                          help=("Number of seconds between repetions of the"
1357
                                " command"))
1358

    
1359
EARLY_RELEASE_OPT = cli_option("--early-release",
1360
                               dest="early_release", default=False,
1361
                               action="store_true",
1362
                               help="Release the locks on the secondary"
1363
                               " node(s) early")
1364

    
1365
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1366
                                  dest="new_cluster_cert",
1367
                                  default=False, action="store_true",
1368
                                  help="Generate a new cluster certificate")
1369

    
1370
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1371
                           default=None,
1372
                           help="File containing new RAPI certificate")
1373

    
1374
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1375
                               default=None, action="store_true",
1376
                               help=("Generate a new self-signed RAPI"
1377
                                     " certificate"))
1378

    
1379
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1380
                            default=None,
1381
                            help="File containing new SPICE certificate")
1382

    
1383
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1384
                              default=None,
1385
                              help="File containing the certificate of the CA"
1386
                              " which signed the SPICE certificate")
1387

    
1388
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1389
                                dest="new_spice_cert", default=None,
1390
                                action="store_true",
1391
                                help=("Generate a new self-signed SPICE"
1392
                                      " certificate"))
1393

    
1394
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1395
                                    dest="new_confd_hmac_key",
1396
                                    default=False, action="store_true",
1397
                                    help=("Create a new HMAC key for %s" %
1398
                                          constants.CONFD))
1399

    
1400
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1401
                                       dest="cluster_domain_secret",
1402
                                       default=None,
1403
                                       help=("Load new new cluster domain"
1404
                                             " secret from file"))
1405

    
1406
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1407
                                           dest="new_cluster_domain_secret",
1408
                                           default=False, action="store_true",
1409
                                           help=("Create a new cluster domain"
1410
                                                 " secret"))
1411

    
1412
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1413
                              dest="use_replication_network",
1414
                              help="Whether to use the replication network"
1415
                              " for talking to the nodes",
1416
                              action="store_true", default=False)
1417

    
1418
MAINTAIN_NODE_HEALTH_OPT = \
1419
    cli_option("--maintain-node-health", dest="maintain_node_health",
1420
               metavar=_YORNO, default=None, type="bool",
1421
               help="Configure the cluster to automatically maintain node"
1422
               " health, by shutting down unknown instances, shutting down"
1423
               " unknown DRBD devices, etc.")
1424

    
1425
IDENTIFY_DEFAULTS_OPT = \
1426
    cli_option("--identify-defaults", dest="identify_defaults",
1427
               default=False, action="store_true",
1428
               help="Identify which saved instance parameters are equal to"
1429
               " the current cluster defaults and set them as such, instead"
1430
               " of marking them as overridden")
1431

    
1432
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1433
                         action="store", dest="uid_pool",
1434
                         help=("A list of user-ids or user-id"
1435
                               " ranges separated by commas"))
1436

    
1437
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1438
                          action="store", dest="add_uids",
1439
                          help=("A list of user-ids or user-id"
1440
                                " ranges separated by commas, to be"
1441
                                " added to the user-id pool"))
1442

    
1443
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1444
                             action="store", dest="remove_uids",
1445
                             help=("A list of user-ids or user-id"
1446
                                   " ranges separated by commas, to be"
1447
                                   " removed from the user-id pool"))
1448

    
1449
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1450
                              action="store", dest="reserved_lvs",
1451
                              help=("A comma-separated list of reserved"
1452
                                    " logical volumes names, that will be"
1453
                                    " ignored by cluster verify"))
1454

    
1455
ROMAN_OPT = cli_option("--roman",
1456
                       dest="roman_integers", default=False,
1457
                       action="store_true",
1458
                       help="Use roman numbers for positive integers")
1459

    
1460
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1461
                             action="store", default=None,
1462
                             help="Specifies usermode helper for DRBD")
1463

    
1464
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1465
                                action="store_false", default=True,
1466
                                help="Disable support for DRBD")
1467

    
1468
PRIMARY_IP_VERSION_OPT = \
1469
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1470
               action="store", dest="primary_ip_version",
1471
               metavar="%d|%d" % (constants.IP4_VERSION,
1472
                                  constants.IP6_VERSION),
1473
               help="Cluster-wide IP version for primary IP")
1474

    
1475
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1476
                              action="store_true",
1477
                              help="Show machine name for every line in output")
1478

    
1479
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1480
                              action="store_true",
1481
                              help=("Hide successful results and show failures"
1482
                                    " only (determined by the exit code)"))
1483

    
1484
REASON_OPT = cli_option("--reason", default=None,
1485
                        help="The reason for executing the command")
1486

    
1487

    
1488
def _PriorityOptionCb(option, _, value, parser):
1489
  """Callback for processing C{--priority} option.
1490

1491
  """
1492
  value = _PRIONAME_TO_VALUE[value]
1493

    
1494
  setattr(parser.values, option.dest, value)
1495

    
1496

    
1497
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1498
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1499
                          choices=_PRIONAME_TO_VALUE.keys(),
1500
                          action="callback", type="choice",
1501
                          callback=_PriorityOptionCb,
1502
                          help="Priority for opcode processing")
1503

    
1504
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1505
                        type="bool", default=None, metavar=_YORNO,
1506
                        help="Sets the hidden flag on the OS")
1507

    
1508
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1509
                        type="bool", default=None, metavar=_YORNO,
1510
                        help="Sets the blacklisted flag on the OS")
1511

    
1512
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1513
                                     type="bool", metavar=_YORNO,
1514
                                     dest="prealloc_wipe_disks",
1515
                                     help=("Wipe disks prior to instance"
1516
                                           " creation"))
1517

    
1518
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1519
                             type="keyval", default=None,
1520
                             help="Node parameters")
1521

    
1522
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1523
                              action="store", metavar="POLICY", default=None,
1524
                              help="Allocation policy for the node group")
1525

    
1526
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1527
                              type="bool", metavar=_YORNO,
1528
                              dest="node_powered",
1529
                              help="Specify if the SoR for node is powered")
1530

    
1531
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1532
                             default=constants.OOB_TIMEOUT,
1533
                             help="Maximum time to wait for out-of-band helper")
1534

    
1535
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1536
                             default=constants.OOB_POWER_DELAY,
1537
                             help="Time in seconds to wait between power-ons")
1538

    
1539
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1540
                              action="store_true", default=False,
1541
                              help=("Whether command argument should be treated"
1542
                                    " as filter"))
1543

    
1544
NO_REMEMBER_OPT = cli_option("--no-remember",
1545
                             dest="no_remember",
1546
                             action="store_true", default=False,
1547
                             help="Perform but do not record the change"
1548
                             " in the configuration")
1549

    
1550
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1551
                              default=False, action="store_true",
1552
                              help="Evacuate primary instances only")
1553

    
1554
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1555
                                default=False, action="store_true",
1556
                                help="Evacuate secondary instances only"
1557
                                     " (applies only to internally mirrored"
1558
                                     " disk templates, e.g. %s)" %
1559
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1560

    
1561
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1562
                                action="store_true", default=False,
1563
                                help="Pause instance at startup")
1564

    
1565
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1566
                          help="Destination node group (name or uuid)",
1567
                          default=None, action="append",
1568
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1569

    
1570
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1571
                               action="append", dest="ignore_errors",
1572
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1573
                               help="Error code to be ignored")
1574

    
1575
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1576
                            action="append",
1577
                            help=("Specify disk state information in the"
1578
                                  " format"
1579
                                  " storage_type/identifier:option=value,...;"
1580
                                  " note this is unused for now"),
1581
                            type="identkeyval")
1582

    
1583
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1584
                          action="append",
1585
                          help=("Specify hypervisor state information in the"
1586
                                " format hypervisor:option=value,...;"
1587
                                " note this is unused for now"),
1588
                          type="identkeyval")
1589

    
1590
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1591
                                action="store_true", default=False,
1592
                                help="Ignore instance policy violations")
1593

    
1594
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1595
                             help="Sets the instance's runtime memory,"
1596
                             " ballooning it up or down to the new value",
1597
                             default=None, type="unit", metavar="<size>")
1598

    
1599
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1600
                          action="store_true", default=False,
1601
                          help="Marks the grow as absolute instead of the"
1602
                          " (default) relative mode")
1603

    
1604
NETWORK_OPT = cli_option("--network",
1605
                         action="store", default=None, dest="network",
1606
                         help="IP network in CIDR notation")
1607

    
1608
GATEWAY_OPT = cli_option("--gateway",
1609
                         action="store", default=None, dest="gateway",
1610
                         help="IP address of the router (gateway)")
1611

    
1612
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1613
                                  action="store", default=None,
1614
                                  dest="add_reserved_ips",
1615
                                  help="Comma-separated list of"
1616
                                  " reserved IPs to add")
1617

    
1618
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1619
                                     action="store", default=None,
1620
                                     dest="remove_reserved_ips",
1621
                                     help="Comma-delimited list of"
1622
                                     " reserved IPs to remove")
1623

    
1624
NETWORK6_OPT = cli_option("--network6",
1625
                          action="store", default=None, dest="network6",
1626
                          help="IP network in CIDR notation")
1627

    
1628
GATEWAY6_OPT = cli_option("--gateway6",
1629
                          action="store", default=None, dest="gateway6",
1630
                          help="IP6 address of the router (gateway)")
1631

    
1632
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1633
                                  dest="conflicts_check",
1634
                                  default=True,
1635
                                  action="store_false",
1636
                                  help="Don't check for conflicting IPs")
1637

    
1638
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1639
                                 default=False, action="store_true",
1640
                                 help="Include default values")
1641

    
1642
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1643
                         action="store_true", default=False,
1644
                         help="Try to hotplug device")
1645

    
1646
#: Options provided by all commands
1647
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1648

    
1649
# options related to asynchronous job handling
1650

    
1651
SUBMIT_OPTS = [
1652
  SUBMIT_OPT,
1653
  PRINT_JOBID_OPT,
1654
  ]
1655

    
1656
# common options for creating instances. add and import then add their own
1657
# specific ones.
1658
COMMON_CREATE_OPTS = [
1659
  BACKEND_OPT,
1660
  DISK_OPT,
1661
  DISK_TEMPLATE_OPT,
1662
  FILESTORE_DIR_OPT,
1663
  FILESTORE_DRIVER_OPT,
1664
  HYPERVISOR_OPT,
1665
  IALLOCATOR_OPT,
1666
  NET_OPT,
1667
  NODE_PLACEMENT_OPT,
1668
  NOIPCHECK_OPT,
1669
  NOCONFLICTSCHECK_OPT,
1670
  NONAMECHECK_OPT,
1671
  NONICS_OPT,
1672
  NWSYNC_OPT,
1673
  OSPARAMS_OPT,
1674
  OS_SIZE_OPT,
1675
  SUBMIT_OPT,
1676
  PRINT_JOBID_OPT,
1677
  TAG_ADD_OPT,
1678
  DRY_RUN_OPT,
1679
  PRIORITY_OPT,
1680
  ]
1681

    
1682
# common instance policy options
1683
INSTANCE_POLICY_OPTS = [
1684
  IPOLICY_BOUNDS_SPECS_OPT,
1685
  IPOLICY_DISK_TEMPLATES,
1686
  IPOLICY_VCPU_RATIO,
1687
  IPOLICY_SPINDLE_RATIO,
1688
  ]
1689

    
1690
# instance policy split specs options
1691
SPLIT_ISPECS_OPTS = [
1692
  SPECS_CPU_COUNT_OPT,
1693
  SPECS_DISK_COUNT_OPT,
1694
  SPECS_DISK_SIZE_OPT,
1695
  SPECS_MEM_SIZE_OPT,
1696
  SPECS_NIC_COUNT_OPT,
1697
  ]
1698

    
1699

    
1700
class _ShowUsage(Exception):
1701
  """Exception class for L{_ParseArgs}.
1702

1703
  """
1704
  def __init__(self, exit_error):
1705
    """Initializes instances of this class.
1706

1707
    @type exit_error: bool
1708
    @param exit_error: Whether to report failure on exit
1709

1710
    """
1711
    Exception.__init__(self)
1712
    self.exit_error = exit_error
1713

    
1714

    
1715
class _ShowVersion(Exception):
1716
  """Exception class for L{_ParseArgs}.
1717

1718
  """
1719

    
1720

    
1721
def _ParseArgs(binary, argv, commands, aliases, env_override):
1722
  """Parser for the command line arguments.
1723

1724
  This function parses the arguments and returns the function which
1725
  must be executed together with its (modified) arguments.
1726

1727
  @param binary: Script name
1728
  @param argv: Command line arguments
1729
  @param commands: Dictionary containing command definitions
1730
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1731
  @param env_override: list of env variables allowed for default args
1732
  @raise _ShowUsage: If usage description should be shown
1733
  @raise _ShowVersion: If version should be shown
1734

1735
  """
1736
  assert not (env_override - set(commands))
1737
  assert not (set(aliases.keys()) & set(commands.keys()))
1738

    
1739
  if len(argv) > 1:
1740
    cmd = argv[1]
1741
  else:
1742
    # No option or command given
1743
    raise _ShowUsage(exit_error=True)
1744

    
1745
  if cmd == "--version":
1746
    raise _ShowVersion()
1747
  elif cmd == "--help":
1748
    raise _ShowUsage(exit_error=False)
1749
  elif not (cmd in commands or cmd in aliases):
1750
    raise _ShowUsage(exit_error=True)
1751

    
1752
  # get command, unalias it, and look it up in commands
1753
  if cmd in aliases:
1754
    if aliases[cmd] not in commands:
1755
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1756
                                   " command '%s'" % (cmd, aliases[cmd]))
1757

    
1758
    cmd = aliases[cmd]
1759

    
1760
  if cmd in env_override:
1761
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1762
    env_args = os.environ.get(args_env_name)
1763
    if env_args:
1764
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1765

    
1766
  func, args_def, parser_opts, usage, description = commands[cmd]
1767
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1768
                        description=description,
1769
                        formatter=TitledHelpFormatter(),
1770
                        usage="%%prog %s %s" % (cmd, usage))
1771
  parser.disable_interspersed_args()
1772
  options, args = parser.parse_args(args=argv[2:])
1773

    
1774
  if not _CheckArguments(cmd, args_def, args):
1775
    return None, None, None
1776

    
1777
  return func, options, args
1778

    
1779

    
1780
def _FormatUsage(binary, commands):
1781
  """Generates a nice description of all commands.
1782

1783
  @param binary: Script name
1784
  @param commands: Dictionary containing command definitions
1785

1786
  """
1787
  # compute the max line length for cmd + usage
1788
  mlen = min(60, max(map(len, commands)))
1789

    
1790
  yield "Usage: %s {command} [options...] [argument...]" % binary
1791
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1792
  yield ""
1793
  yield "Commands:"
1794

    
1795
  # and format a nice command list
1796
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1797
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1798
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1799
    for line in help_lines:
1800
      yield " %-*s   %s" % (mlen, "", line)
1801

    
1802
  yield ""
1803

    
1804

    
1805
def _CheckArguments(cmd, args_def, args):
1806
  """Verifies the arguments using the argument definition.
1807

1808
  Algorithm:
1809

1810
    1. Abort with error if values specified by user but none expected.
1811

1812
    1. For each argument in definition
1813

1814
      1. Keep running count of minimum number of values (min_count)
1815
      1. Keep running count of maximum number of values (max_count)
1816
      1. If it has an unlimited number of values
1817

1818
        1. Abort with error if it's not the last argument in the definition
1819

1820
    1. If last argument has limited number of values
1821

1822
      1. Abort with error if number of values doesn't match or is too large
1823

1824
    1. Abort with error if user didn't pass enough values (min_count)
1825

1826
  """
1827
  if args and not args_def:
1828
    ToStderr("Error: Command %s expects no arguments", cmd)
1829
    return False
1830

    
1831
  min_count = None
1832
  max_count = None
1833
  check_max = None
1834

    
1835
  last_idx = len(args_def) - 1
1836

    
1837
  for idx, arg in enumerate(args_def):
1838
    if min_count is None:
1839
      min_count = arg.min
1840
    elif arg.min is not None:
1841
      min_count += arg.min
1842

    
1843
    if max_count is None:
1844
      max_count = arg.max
1845
    elif arg.max is not None:
1846
      max_count += arg.max
1847

    
1848
    if idx == last_idx:
1849
      check_max = (arg.max is not None)
1850

    
1851
    elif arg.max is None:
1852
      raise errors.ProgrammerError("Only the last argument can have max=None")
1853

    
1854
  if check_max:
1855
    # Command with exact number of arguments
1856
    if (min_count is not None and max_count is not None and
1857
        min_count == max_count and len(args) != min_count):
1858
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1859
      return False
1860

    
1861
    # Command with limited number of arguments
1862
    if max_count is not None and len(args) > max_count:
1863
      ToStderr("Error: Command %s expects only %d argument(s)",
1864
               cmd, max_count)
1865
      return False
1866

    
1867
  # Command with some required arguments
1868
  if min_count is not None and len(args) < min_count:
1869
    ToStderr("Error: Command %s expects at least %d argument(s)",
1870
             cmd, min_count)
1871
    return False
1872

    
1873
  return True
1874

    
1875

    
1876
def SplitNodeOption(value):
1877
  """Splits the value of a --node option.
1878

1879
  """
1880
  if value and ":" in value:
1881
    return value.split(":", 1)
1882
  else:
1883
    return (value, None)
1884

    
1885

    
1886
def CalculateOSNames(os_name, os_variants):
1887
  """Calculates all the names an OS can be called, according to its variants.
1888

1889
  @type os_name: string
1890
  @param os_name: base name of the os
1891
  @type os_variants: list or None
1892
  @param os_variants: list of supported variants
1893
  @rtype: list
1894
  @return: list of valid names
1895

1896
  """
1897
  if os_variants:
1898
    return ["%s+%s" % (os_name, v) for v in os_variants]
1899
  else:
1900
    return [os_name]
1901

    
1902

    
1903
def ParseFields(selected, default):
1904
  """Parses the values of "--field"-like options.
1905

1906
  @type selected: string or None
1907
  @param selected: User-selected options
1908
  @type default: list
1909
  @param default: Default fields
1910

1911
  """
1912
  if selected is None:
1913
    return default
1914

    
1915
  if selected.startswith("+"):
1916
    return default + selected[1:].split(",")
1917

    
1918
  return selected.split(",")
1919

    
1920

    
1921
UsesRPC = rpc.RunWithRPC
1922

    
1923

    
1924
def AskUser(text, choices=None):
1925
  """Ask the user a question.
1926

1927
  @param text: the question to ask
1928

1929
  @param choices: list with elements tuples (input_char, return_value,
1930
      description); if not given, it will default to: [('y', True,
1931
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1932
      note that the '?' char is reserved for help
1933

1934
  @return: one of the return values from the choices list; if input is
1935
      not possible (i.e. not running with a tty, we return the last
1936
      entry from the list
1937

1938
  """
1939
  if choices is None:
1940
    choices = [("y", True, "Perform the operation"),
1941
               ("n", False, "Do not perform the operation")]
1942
  if not choices or not isinstance(choices, list):
1943
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1944
  for entry in choices:
1945
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1946
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1947

    
1948
  answer = choices[-1][1]
1949
  new_text = []
1950
  for line in text.splitlines():
1951
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1952
  text = "\n".join(new_text)
1953
  try:
1954
    f = file("/dev/tty", "a+")
1955
  except IOError:
1956
    return answer
1957
  try:
1958
    chars = [entry[0] for entry in choices]
1959
    chars[-1] = "[%s]" % chars[-1]
1960
    chars.append("?")
1961
    maps = dict([(entry[0], entry[1]) for entry in choices])
1962
    while True:
1963
      f.write(text)
1964
      f.write("\n")
1965
      f.write("/".join(chars))
1966
      f.write(": ")
1967
      line = f.readline(2).strip().lower()
1968
      if line in maps:
1969
        answer = maps[line]
1970
        break
1971
      elif line == "?":
1972
        for entry in choices:
1973
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1974
        f.write("\n")
1975
        continue
1976
  finally:
1977
    f.close()
1978
  return answer
1979

    
1980

    
1981
class JobSubmittedException(Exception):
1982
  """Job was submitted, client should exit.
1983

1984
  This exception has one argument, the ID of the job that was
1985
  submitted. The handler should print this ID.
1986

1987
  This is not an error, just a structured way to exit from clients.
1988

1989
  """
1990

    
1991

    
1992
def SendJob(ops, cl=None):
1993
  """Function to submit an opcode without waiting for the results.
1994

1995
  @type ops: list
1996
  @param ops: list of opcodes
1997
  @type cl: luxi.Client
1998
  @param cl: the luxi client to use for communicating with the master;
1999
             if None, a new client will be created
2000

2001
  """
2002
  if cl is None:
2003
    cl = GetClient()
2004

    
2005
  job_id = cl.SubmitJob(ops)
2006

    
2007
  return job_id
2008

    
2009

    
2010
def GenericPollJob(job_id, cbs, report_cbs):
2011
  """Generic job-polling function.
2012

2013
  @type job_id: number
2014
  @param job_id: Job ID
2015
  @type cbs: Instance of L{JobPollCbBase}
2016
  @param cbs: Data callbacks
2017
  @type report_cbs: Instance of L{JobPollReportCbBase}
2018
  @param report_cbs: Reporting callbacks
2019

2020
  """
2021
  prev_job_info = None
2022
  prev_logmsg_serial = None
2023

    
2024
  status = None
2025

    
2026
  while True:
2027
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2028
                                      prev_logmsg_serial)
2029
    if not result:
2030
      # job not found, go away!
2031
      raise errors.JobLost("Job with id %s lost" % job_id)
2032

    
2033
    if result == constants.JOB_NOTCHANGED:
2034
      report_cbs.ReportNotChanged(job_id, status)
2035

    
2036
      # Wait again
2037
      continue
2038

    
2039
    # Split result, a tuple of (field values, log entries)
2040
    (job_info, log_entries) = result
2041
    (status, ) = job_info
2042

    
2043
    if log_entries:
2044
      for log_entry in log_entries:
2045
        (serial, timestamp, log_type, message) = log_entry
2046
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2047
                                    log_type, message)
2048
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2049

    
2050
    # TODO: Handle canceled and archived jobs
2051
    elif status in (constants.JOB_STATUS_SUCCESS,
2052
                    constants.JOB_STATUS_ERROR,
2053
                    constants.JOB_STATUS_CANCELING,
2054
                    constants.JOB_STATUS_CANCELED):
2055
      break
2056

    
2057
    prev_job_info = job_info
2058

    
2059
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2060
  if not jobs:
2061
    raise errors.JobLost("Job with id %s lost" % job_id)
2062

    
2063
  status, opstatus, result = jobs[0]
2064

    
2065
  if status == constants.JOB_STATUS_SUCCESS:
2066
    return result
2067

    
2068
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2069
    raise errors.OpExecError("Job was canceled")
2070

    
2071
  has_ok = False
2072
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2073
    if status == constants.OP_STATUS_SUCCESS:
2074
      has_ok = True
2075
    elif status == constants.OP_STATUS_ERROR:
2076
      errors.MaybeRaise(msg)
2077

    
2078
      if has_ok:
2079
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2080
                                 (idx, msg))
2081

    
2082
      raise errors.OpExecError(str(msg))
2083

    
2084
  # default failure mode
2085
  raise errors.OpExecError(result)
2086

    
2087

    
2088
class JobPollCbBase:
2089
  """Base class for L{GenericPollJob} callbacks.
2090

2091
  """
2092
  def __init__(self):
2093
    """Initializes this class.
2094

2095
    """
2096

    
2097
  def WaitForJobChangeOnce(self, job_id, fields,
2098
                           prev_job_info, prev_log_serial):
2099
    """Waits for changes on a job.
2100

2101
    """
2102
    raise NotImplementedError()
2103

    
2104
  def QueryJobs(self, job_ids, fields):
2105
    """Returns the selected fields for the selected job IDs.
2106

2107
    @type job_ids: list of numbers
2108
    @param job_ids: Job IDs
2109
    @type fields: list of strings
2110
    @param fields: Fields
2111

2112
    """
2113
    raise NotImplementedError()
2114

    
2115

    
2116
class JobPollReportCbBase:
2117
  """Base class for L{GenericPollJob} reporting callbacks.
2118

2119
  """
2120
  def __init__(self):
2121
    """Initializes this class.
2122

2123
    """
2124

    
2125
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2126
    """Handles a log message.
2127

2128
    """
2129
    raise NotImplementedError()
2130

    
2131
  def ReportNotChanged(self, job_id, status):
2132
    """Called for if a job hasn't changed in a while.
2133

2134
    @type job_id: number
2135
    @param job_id: Job ID
2136
    @type status: string or None
2137
    @param status: Job status if available
2138

2139
    """
2140
    raise NotImplementedError()
2141

    
2142

    
2143
class _LuxiJobPollCb(JobPollCbBase):
2144
  def __init__(self, cl):
2145
    """Initializes this class.
2146

2147
    """
2148
    JobPollCbBase.__init__(self)
2149
    self.cl = cl
2150

    
2151
  def WaitForJobChangeOnce(self, job_id, fields,
2152
                           prev_job_info, prev_log_serial):
2153
    """Waits for changes on a job.
2154

2155
    """
2156
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2157
                                        prev_job_info, prev_log_serial)
2158

    
2159
  def QueryJobs(self, job_ids, fields):
2160
    """Returns the selected fields for the selected job IDs.
2161

2162
    """
2163
    return self.cl.QueryJobs(job_ids, fields)
2164

    
2165

    
2166
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2167
  def __init__(self, feedback_fn):
2168
    """Initializes this class.
2169

2170
    """
2171
    JobPollReportCbBase.__init__(self)
2172

    
2173
    self.feedback_fn = feedback_fn
2174

    
2175
    assert callable(feedback_fn)
2176

    
2177
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2178
    """Handles a log message.
2179

2180
    """
2181
    self.feedback_fn((timestamp, log_type, log_msg))
2182

    
2183
  def ReportNotChanged(self, job_id, status):
2184
    """Called if a job hasn't changed in a while.
2185

2186
    """
2187
    # Ignore
2188

    
2189

    
2190
class StdioJobPollReportCb(JobPollReportCbBase):
2191
  def __init__(self):
2192
    """Initializes this class.
2193

2194
    """
2195
    JobPollReportCbBase.__init__(self)
2196

    
2197
    self.notified_queued = False
2198
    self.notified_waitlock = False
2199

    
2200
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2201
    """Handles a log message.
2202

2203
    """
2204
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2205
             FormatLogMessage(log_type, log_msg))
2206

    
2207
  def ReportNotChanged(self, job_id, status):
2208
    """Called if a job hasn't changed in a while.
2209

2210
    """
2211
    if status is None:
2212
      return
2213

    
2214
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2215
      ToStderr("Job %s is waiting in queue", job_id)
2216
      self.notified_queued = True
2217

    
2218
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2219
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2220
      self.notified_waitlock = True
2221

    
2222

    
2223
def FormatLogMessage(log_type, log_msg):
2224
  """Formats a job message according to its type.
2225

2226
  """
2227
  if log_type != constants.ELOG_MESSAGE:
2228
    log_msg = str(log_msg)
2229

    
2230
  return utils.SafeEncode(log_msg)
2231

    
2232

    
2233
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2234
  """Function to poll for the result of a job.
2235

2236
  @type job_id: job identified
2237
  @param job_id: the job to poll for results
2238
  @type cl: luxi.Client
2239
  @param cl: the luxi client to use for communicating with the master;
2240
             if None, a new client will be created
2241

2242
  """
2243
  if cl is None:
2244
    cl = GetClient()
2245

    
2246
  if reporter is None:
2247
    if feedback_fn:
2248
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2249
    else:
2250
      reporter = StdioJobPollReportCb()
2251
  elif feedback_fn:
2252
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2253

    
2254
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2255

    
2256

    
2257
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2258
  """Legacy function to submit an opcode.
2259

2260
  This is just a simple wrapper over the construction of the processor
2261
  instance. It should be extended to better handle feedback and
2262
  interaction functions.
2263

2264
  """
2265
  if cl is None:
2266
    cl = GetClient()
2267

    
2268
  SetGenericOpcodeOpts([op], opts)
2269

    
2270
  job_id = SendJob([op], cl=cl)
2271
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2272
    ToStdout("%d" % job_id)
2273

    
2274
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2275
                       reporter=reporter)
2276

    
2277
  return op_results[0]
2278

    
2279

    
2280
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2281
  """Wrapper around SubmitOpCode or SendJob.
2282

2283
  This function will decide, based on the 'opts' parameter, whether to
2284
  submit and wait for the result of the opcode (and return it), or
2285
  whether to just send the job and print its identifier. It is used in
2286
  order to simplify the implementation of the '--submit' option.
2287

2288
  It will also process the opcodes if we're sending the via SendJob
2289
  (otherwise SubmitOpCode does it).
2290

2291
  """
2292
  if opts and opts.submit_only:
2293
    job = [op]
2294
    SetGenericOpcodeOpts(job, opts)
2295
    job_id = SendJob(job, cl=cl)
2296
    if opts.print_jobid:
2297
      ToStdout("%d" % job_id)
2298
    raise JobSubmittedException(job_id)
2299
  else:
2300
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2301

    
2302

    
2303
def _InitReasonTrail(op, opts):
2304
  """Builds the first part of the reason trail
2305

2306
  Builds the initial part of the reason trail, adding the user provided reason
2307
  (if it exists) and the name of the command starting the operation.
2308

2309
  @param op: the opcode the reason trail will be added to
2310
  @param opts: the command line options selected by the user
2311

2312
  """
2313
  assert len(sys.argv) >= 2
2314
  trail = []
2315

    
2316
  if opts.reason:
2317
    trail.append((constants.OPCODE_REASON_SRC_USER,
2318
                  opts.reason,
2319
                  utils.EpochNano()))
2320

    
2321
  binary = os.path.basename(sys.argv[0])
2322
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2323
  command = sys.argv[1]
2324
  trail.append((source, command, utils.EpochNano()))
2325
  op.reason = trail
2326

    
2327

    
2328
def SetGenericOpcodeOpts(opcode_list, options):
2329
  """Processor for generic options.
2330

2331
  This function updates the given opcodes based on generic command
2332
  line options (like debug, dry-run, etc.).
2333

2334
  @param opcode_list: list of opcodes
2335
  @param options: command line options or None
2336
  @return: None (in-place modification)
2337

2338
  """
2339
  if not options:
2340
    return
2341
  for op in opcode_list:
2342
    op.debug_level = options.debug
2343
    if hasattr(options, "dry_run"):
2344
      op.dry_run = options.dry_run
2345
    if getattr(options, "priority", None) is not None:
2346
      op.priority = options.priority
2347
    _InitReasonTrail(op, options)
2348

    
2349

    
2350
def GetClient(query=False):
2351
  """Connects to the a luxi socket and returns a client.
2352

2353
  @type query: boolean
2354
  @param query: this signifies that the client will only be
2355
      used for queries; if the build-time parameter
2356
      enable-split-queries is enabled, then the client will be
2357
      connected to the query socket instead of the masterd socket
2358

2359
  """
2360
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2361
  if override_socket:
2362
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2363
      address = pathutils.MASTER_SOCKET
2364
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2365
      address = pathutils.QUERY_SOCKET
2366
    else:
2367
      address = override_socket
2368
  elif query and constants.ENABLE_SPLIT_QUERY:
2369
    address = pathutils.QUERY_SOCKET
2370
  else:
2371
    address = None
2372
  # TODO: Cache object?
2373
  try:
2374
    client = luxi.Client(address=address)
2375
  except luxi.NoMasterError:
2376
    ss = ssconf.SimpleStore()
2377

    
2378
    # Try to read ssconf file
2379
    try:
2380
      ss.GetMasterNode()
2381
    except errors.ConfigurationError:
2382
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2383
                                 " not part of a cluster",
2384
                                 errors.ECODE_INVAL)
2385

    
2386
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2387
    if master != myself:
2388
      raise errors.OpPrereqError("This is not the master node, please connect"
2389
                                 " to node '%s' and rerun the command" %
2390
                                 master, errors.ECODE_INVAL)
2391
    raise
2392
  return client
2393

    
2394

    
2395
def FormatError(err):
2396
  """Return a formatted error message for a given error.
2397

2398
  This function takes an exception instance and returns a tuple
2399
  consisting of two values: first, the recommended exit code, and
2400
  second, a string describing the error message (not
2401
  newline-terminated).
2402

2403
  """
2404
  retcode = 1
2405
  obuf = StringIO()
2406
  msg = str(err)
2407
  if isinstance(err, errors.ConfigurationError):
2408
    txt = "Corrupt configuration file: %s" % msg
2409
    logging.error(txt)
2410
    obuf.write(txt + "\n")
2411
    obuf.write("Aborting.")
2412
    retcode = 2
2413
  elif isinstance(err, errors.HooksAbort):
2414
    obuf.write("Failure: hooks execution failed:\n")
2415
    for node, script, out in err.args[0]:
2416
      if out:
2417
        obuf.write("  node: %s, script: %s, output: %s\n" %
2418
                   (node, script, out))
2419
      else:
2420
        obuf.write("  node: %s, script: %s (no output)\n" %
2421
                   (node, script))
2422
  elif isinstance(err, errors.HooksFailure):
2423
    obuf.write("Failure: hooks general failure: %s" % msg)
2424
  elif isinstance(err, errors.ResolverError):
2425
    this_host = netutils.Hostname.GetSysName()
2426
    if err.args[0] == this_host:
2427
      msg = "Failure: can't resolve my own hostname ('%s')"
2428
    else:
2429
      msg = "Failure: can't resolve hostname '%s'"
2430
    obuf.write(msg % err.args[0])
2431
  elif isinstance(err, errors.OpPrereqError):
2432
    if len(err.args) == 2:
2433
      obuf.write("Failure: prerequisites not met for this"
2434
                 " operation:\nerror type: %s, error details:\n%s" %
2435
                 (err.args[1], err.args[0]))
2436
    else:
2437
      obuf.write("Failure: prerequisites not met for this"
2438
                 " operation:\n%s" % msg)
2439
  elif isinstance(err, errors.OpExecError):
2440
    obuf.write("Failure: command execution error:\n%s" % msg)
2441
  elif isinstance(err, errors.TagError):
2442
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2443
  elif isinstance(err, errors.JobQueueDrainError):
2444
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2445
               " accept new requests\n")
2446
  elif isinstance(err, errors.JobQueueFull):
2447
    obuf.write("Failure: the job queue is full and doesn't accept new"
2448
               " job submissions until old jobs are archived\n")
2449
  elif isinstance(err, errors.TypeEnforcementError):
2450
    obuf.write("Parameter Error: %s" % msg)
2451
  elif isinstance(err, errors.ParameterError):
2452
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2453
  elif isinstance(err, luxi.NoMasterError):
2454
    if err.args[0] == pathutils.MASTER_SOCKET:
2455
      daemon = "the master daemon"
2456
    elif err.args[0] == pathutils.QUERY_SOCKET:
2457
      daemon = "the config daemon"
2458
    else:
2459
      daemon = "socket '%s'" % str(err.args[0])
2460
    obuf.write("Cannot communicate with %s.\nIs the process running"
2461
               " and listening for connections?" % daemon)
2462
  elif isinstance(err, luxi.TimeoutError):
2463
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2464
               " been submitted and will continue to run even if the call"
2465
               " timed out. Useful commands in this situation are \"gnt-job"
2466
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2467
    obuf.write(msg)
2468
  elif isinstance(err, luxi.PermissionError):
2469
    obuf.write("It seems you don't have permissions to connect to the"
2470
               " master daemon.\nPlease retry as a different user.")
2471
  elif isinstance(err, luxi.ProtocolError):
2472
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2473
               "%s" % msg)
2474
  elif isinstance(err, errors.JobLost):
2475
    obuf.write("Error checking job status: %s" % msg)
2476
  elif isinstance(err, errors.QueryFilterParseError):
2477
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2478
    obuf.write("\n".join(err.GetDetails()))
2479
  elif isinstance(err, errors.GenericError):
2480
    obuf.write("Unhandled Ganeti error: %s" % msg)
2481
  elif isinstance(err, JobSubmittedException):
2482
    obuf.write("JobID: %s\n" % err.args[0])
2483
    retcode = 0
2484
  else:
2485
    obuf.write("Unhandled exception: %s" % msg)
2486
  return retcode, obuf.getvalue().rstrip("\n")
2487

    
2488

    
2489
def GenericMain(commands, override=None, aliases=None,
2490
                env_override=frozenset()):
2491
  """Generic main function for all the gnt-* commands.
2492

2493
  @param commands: a dictionary with a special structure, see the design doc
2494
                   for command line handling.
2495
  @param override: if not None, we expect a dictionary with keys that will
2496
                   override command line options; this can be used to pass
2497
                   options from the scripts to generic functions
2498
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2499
  @param env_override: list of environment names which are allowed to submit
2500
                       default args for commands
2501

2502
  """
2503
  # save the program name and the entire command line for later logging
2504
  if sys.argv:
2505
    binary = os.path.basename(sys.argv[0])
2506
    if not binary:
2507
      binary = sys.argv[0]
2508

    
2509
    if len(sys.argv) >= 2:
2510
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2511
    else:
2512
      logname = binary
2513

    
2514
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2515
  else:
2516
    binary = "<unknown program>"
2517
    cmdline = "<unknown>"
2518

    
2519
  if aliases is None:
2520
    aliases = {}
2521

    
2522
  try:
2523
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2524
                                       env_override)
2525
  except _ShowVersion:
2526
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2527
             constants.RELEASE_VERSION)
2528
    return constants.EXIT_SUCCESS
2529
  except _ShowUsage, err:
2530
    for line in _FormatUsage(binary, commands):
2531
      ToStdout(line)
2532

    
2533
    if err.exit_error:
2534
      return constants.EXIT_FAILURE
2535
    else:
2536
      return constants.EXIT_SUCCESS
2537
  except errors.ParameterError, err:
2538
    result, err_msg = FormatError(err)
2539
    ToStderr(err_msg)
2540
    return 1
2541

    
2542
  if func is None: # parse error
2543
    return 1
2544

    
2545
  if override is not None:
2546
    for key, val in override.iteritems():
2547
      setattr(options, key, val)
2548

    
2549
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2550
                     stderr_logging=True)
2551

    
2552
  logging.info("Command line: %s", cmdline)
2553

    
2554
  try:
2555
    result = func(options, args)
2556
  except (errors.GenericError, luxi.ProtocolError,
2557
          JobSubmittedException), err:
2558
    result, err_msg = FormatError(err)
2559
    logging.exception("Error during command processing")
2560
    ToStderr(err_msg)
2561
  except KeyboardInterrupt:
2562
    result = constants.EXIT_FAILURE
2563
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2564
             " might have been submitted and"
2565
             " will continue to run in the background.")
2566
  except IOError, err:
2567
    if err.errno == errno.EPIPE:
2568
      # our terminal went away, we'll exit
2569
      sys.exit(constants.EXIT_FAILURE)
2570
    else:
2571
      raise
2572

    
2573
  return result
2574

    
2575

    
2576
def ParseNicOption(optvalue):
2577
  """Parses the value of the --net option(s).
2578

2579
  """
2580
  try:
2581
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2582
  except (TypeError, ValueError), err:
2583
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2584
                               errors.ECODE_INVAL)
2585

    
2586
  nics = [{}] * nic_max
2587
  for nidx, ndict in optvalue:
2588
    nidx = int(nidx)
2589

    
2590
    if not isinstance(ndict, dict):
2591
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2592
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2593

    
2594
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2595

    
2596
    nics[nidx] = ndict
2597

    
2598
  return nics
2599

    
2600

    
2601
def GenericInstanceCreate(mode, opts, args):
2602
  """Add an instance to the cluster via either creation or import.
2603

2604
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2605
  @param opts: the command line options selected by the user
2606
  @type args: list
2607
  @param args: should contain only one element, the new instance name
2608
  @rtype: int
2609
  @return: the desired exit code
2610

2611
  """
2612
  instance = args[0]
2613

    
2614
  (pnode, snode) = SplitNodeOption(opts.node)
2615

    
2616
  hypervisor = None
2617
  hvparams = {}
2618
  if opts.hypervisor:
2619
    hypervisor, hvparams = opts.hypervisor
2620

    
2621
  if opts.nics:
2622
    nics = ParseNicOption(opts.nics)
2623
  elif opts.no_nics:
2624
    # no nics
2625
    nics = []
2626
  elif mode == constants.INSTANCE_CREATE:
2627
    # default of one nic, all auto
2628
    nics = [{}]
2629
  else:
2630
    # mode == import
2631
    nics = []
2632

    
2633
  if opts.disk_template == constants.DT_DISKLESS:
2634
    if opts.disks or opts.sd_size is not None:
2635
      raise errors.OpPrereqError("Diskless instance but disk"
2636
                                 " information passed", errors.ECODE_INVAL)
2637
    disks = []
2638
  else:
2639
    if (not opts.disks and not opts.sd_size
2640
        and mode == constants.INSTANCE_CREATE):
2641
      raise errors.OpPrereqError("No disk information specified",
2642
                                 errors.ECODE_INVAL)
2643
    if opts.disks and opts.sd_size is not None:
2644
      raise errors.OpPrereqError("Please use either the '--disk' or"
2645
                                 " '-s' option", errors.ECODE_INVAL)
2646
    if opts.sd_size is not None:
2647
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2648

    
2649
    if opts.disks:
2650
      try:
2651
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2652
      except ValueError, err:
2653
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2654
                                   errors.ECODE_INVAL)
2655
      disks = [{}] * disk_max
2656
    else:
2657
      disks = []
2658
    for didx, ddict in opts.disks:
2659
      didx = int(didx)
2660
      if not isinstance(ddict, dict):
2661
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2662
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2663
      elif constants.IDISK_SIZE in ddict:
2664
        if constants.IDISK_ADOPT in ddict:
2665
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2666
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2667
        try:
2668
          ddict[constants.IDISK_SIZE] = \
2669
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2670
        except ValueError, err:
2671
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2672
                                     (didx, err), errors.ECODE_INVAL)
2673
      elif constants.IDISK_ADOPT in ddict:
2674
        if constants.IDISK_SPINDLES in ddict:
2675
          raise errors.OpPrereqError("spindles is not a valid option when"
2676
                                     " adopting a disk", errors.ECODE_INVAL)
2677
        if mode == constants.INSTANCE_IMPORT:
2678
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2679
                                     " import", errors.ECODE_INVAL)
2680
        ddict[constants.IDISK_SIZE] = 0
2681
      else:
2682
        raise errors.OpPrereqError("Missing size or adoption source for"
2683
                                   " disk %d" % didx, errors.ECODE_INVAL)
2684
      disks[didx] = ddict
2685

    
2686
  if opts.tags is not None:
2687
    tags = opts.tags.split(",")
2688
  else:
2689
    tags = []
2690

    
2691
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2692
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2693

    
2694
  if mode == constants.INSTANCE_CREATE:
2695
    start = opts.start
2696
    os_type = opts.os
2697
    force_variant = opts.force_variant
2698
    src_node = None
2699
    src_path = None
2700
    no_install = opts.no_install
2701
    identify_defaults = False
2702
  elif mode == constants.INSTANCE_IMPORT:
2703
    start = False
2704
    os_type = None
2705
    force_variant = False
2706
    src_node = opts.src_node
2707
    src_path = opts.src_dir
2708
    no_install = None
2709
    identify_defaults = opts.identify_defaults
2710
  else:
2711
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2712

    
2713
  op = opcodes.OpInstanceCreate(instance_name=instance,
2714
                                disks=disks,
2715
                                disk_template=opts.disk_template,
2716
                                nics=nics,
2717
                                conflicts_check=opts.conflicts_check,
2718
                                pnode=pnode, snode=snode,
2719
                                ip_check=opts.ip_check,
2720
                                name_check=opts.name_check,
2721
                                wait_for_sync=opts.wait_for_sync,
2722
                                file_storage_dir=opts.file_storage_dir,
2723
                                file_driver=opts.file_driver,
2724
                                iallocator=opts.iallocator,
2725
                                hypervisor=hypervisor,
2726
                                hvparams=hvparams,
2727
                                beparams=opts.beparams,
2728
                                osparams=opts.osparams,
2729
                                mode=mode,
2730
                                start=start,
2731
                                os_type=os_type,
2732
                                force_variant=force_variant,
2733
                                src_node=src_node,
2734
                                src_path=src_path,
2735
                                tags=tags,
2736
                                no_install=no_install,
2737
                                identify_defaults=identify_defaults,
2738
                                ignore_ipolicy=opts.ignore_ipolicy)
2739

    
2740
  SubmitOrSend(op, opts)
2741
  return 0
2742

    
2743

    
2744
class _RunWhileClusterStoppedHelper:
2745
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2746

2747
  """
2748
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2749
    """Initializes this class.
2750

2751
    @type feedback_fn: callable
2752
    @param feedback_fn: Feedback function
2753
    @type cluster_name: string
2754
    @param cluster_name: Cluster name
2755
    @type master_node: string
2756
    @param master_node Master node name
2757
    @type online_nodes: list
2758
    @param online_nodes: List of names of online nodes
2759

2760
    """
2761
    self.feedback_fn = feedback_fn
2762
    self.cluster_name = cluster_name
2763
    self.master_node = master_node
2764
    self.online_nodes = online_nodes
2765

    
2766
    self.ssh = ssh.SshRunner(self.cluster_name)
2767

    
2768
    self.nonmaster_nodes = [name for name in online_nodes
2769
                            if name != master_node]
2770

    
2771
    assert self.master_node not in self.nonmaster_nodes
2772

    
2773
  def _RunCmd(self, node_name, cmd):
2774
    """Runs a command on the local or a remote machine.
2775

2776
    @type node_name: string
2777
    @param node_name: Machine name
2778
    @type cmd: list
2779
    @param cmd: Command
2780

2781
    """
2782
    if node_name is None or node_name == self.master_node:
2783
      # No need to use SSH
2784
      result = utils.RunCmd(cmd)
2785
    else:
2786
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2787
                            utils.ShellQuoteArgs(cmd))
2788

    
2789
    if result.failed:
2790
      errmsg = ["Failed to run command %s" % result.cmd]
2791
      if node_name:
2792
        errmsg.append("on node %s" % node_name)
2793
      errmsg.append(": exitcode %s and error %s" %
2794
                    (result.exit_code, result.output))
2795
      raise errors.OpExecError(" ".join(errmsg))
2796

    
2797
  def Call(self, fn, *args):
2798
    """Call function while all daemons are stopped.
2799

2800
    @type fn: callable
2801
    @param fn: Function to be called
2802

2803
    """
2804
    # Pause watcher by acquiring an exclusive lock on watcher state file
2805
    self.feedback_fn("Blocking watcher")
2806
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2807
    try:
2808
      # TODO: Currently, this just blocks. There's no timeout.
2809
      # TODO: Should it be a shared lock?
2810
      watcher_block.Exclusive(blocking=True)
2811

    
2812
      # Stop master daemons, so that no new jobs can come in and all running
2813
      # ones are finished
2814
      self.feedback_fn("Stopping master daemons")
2815
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2816
      try:
2817
        # Stop daemons on all nodes
2818
        for node_name in self.online_nodes:
2819
          self.feedback_fn("Stopping daemons on %s" % node_name)
2820
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2821

    
2822
        # All daemons are shut down now
2823
        try:
2824
          return fn(self, *args)
2825
        except Exception, err:
2826
          _, errmsg = FormatError(err)
2827
          logging.exception("Caught exception")
2828
          self.feedback_fn(errmsg)
2829
          raise
2830
      finally:
2831
        # Start cluster again, master node last
2832
        for node_name in self.nonmaster_nodes + [self.master_node]:
2833
          self.feedback_fn("Starting daemons on %s" % node_name)
2834
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2835
    finally:
2836
      # Resume watcher
2837
      watcher_block.Close()
2838

    
2839

    
2840
def RunWhileClusterStopped(feedback_fn, fn, *args):
2841
  """Calls a function while all cluster daemons are stopped.
2842

2843
  @type feedback_fn: callable
2844
  @param feedback_fn: Feedback function
2845
  @type fn: callable
2846
  @param fn: Function to be called when daemons are stopped
2847

2848
  """
2849
  feedback_fn("Gathering cluster information")
2850

    
2851
  # This ensures we're running on the master daemon
2852
  cl = GetClient()
2853

    
2854
  (cluster_name, master_node) = \
2855
    cl.QueryConfigValues(["cluster_name", "master_node"])
2856

    
2857
  online_nodes = GetOnlineNodes([], cl=cl)
2858

    
2859
  # Don't keep a reference to the client. The master daemon will go away.
2860
  del cl
2861

    
2862
  assert master_node in online_nodes
2863

    
2864
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2865
                                       online_nodes).Call(fn, *args)
2866

    
2867

    
2868
def GenerateTable(headers, fields, separator, data,
2869
                  numfields=None, unitfields=None,
2870
                  units=None):
2871
  """Prints a table with headers and different fields.
2872

2873
  @type headers: dict
2874
  @param headers: dictionary mapping field names to headers for
2875
      the table
2876
  @type fields: list
2877
  @param fields: the field names corresponding to each row in
2878
      the data field
2879
  @param separator: the separator to be used; if this is None,
2880
      the default 'smart' algorithm is used which computes optimal
2881
      field width, otherwise just the separator is used between
2882
      each field
2883
  @type data: list
2884
  @param data: a list of lists, each sublist being one row to be output
2885
  @type numfields: list
2886
  @param numfields: a list with the fields that hold numeric
2887
      values and thus should be right-aligned
2888
  @type unitfields: list
2889
  @param unitfields: a list with the fields that hold numeric
2890
      values that should be formatted with the units field
2891
  @type units: string or None
2892
  @param units: the units we should use for formatting, or None for
2893
      automatic choice (human-readable for non-separator usage, otherwise
2894
      megabytes); this is a one-letter string
2895

2896
  """
2897
  if units is None:
2898
    if separator:
2899
      units = "m"
2900
    else:
2901
      units = "h"
2902

    
2903
  if numfields is None:
2904
    numfields = []
2905
  if unitfields is None:
2906
    unitfields = []
2907

    
2908
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2909
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2910

    
2911
  format_fields = []
2912
  for field in fields:
2913
    if headers and field not in headers:
2914
      # TODO: handle better unknown fields (either revert to old
2915
      # style of raising exception, or deal more intelligently with
2916
      # variable fields)
2917
      headers[field] = field
2918
    if separator is not None:
2919
      format_fields.append("%s")
2920
    elif numfields.Matches(field):
2921
      format_fields.append("%*s")
2922
    else:
2923
      format_fields.append("%-*s")
2924

    
2925
  if separator is None:
2926
    mlens = [0 for name in fields]
2927
    format_str = " ".join(format_fields)
2928
  else:
2929
    format_str = separator.replace("%", "%%").join(format_fields)
2930

    
2931
  for row in data:
2932
    if row is None:
2933
      continue
2934
    for idx, val in enumerate(row):
2935
      if unitfields.Matches(fields[idx]):
2936
        try:
2937
          val = int(val)
2938
        except (TypeError, ValueError):
2939
          pass
2940
        else:
2941
          val = row[idx] = utils.FormatUnit(val, units)
2942
      val = row[idx] = str(val)
2943
      if separator is None:
2944
        mlens[idx] = max(mlens[idx], len(val))
2945

    
2946
  result = []
2947
  if headers:
2948
    args = []
2949
    for idx, name in enumerate(fields):
2950
      hdr = headers[name]
2951
      if separator is None:
2952
        mlens[idx] = max(mlens[idx], len(hdr))
2953
        args.append(mlens[idx])
2954
      args.append(hdr)
2955
    result.append(format_str % tuple(args))
2956

    
2957
  if separator is None:
2958
    assert len(mlens) == len(fields)
2959

    
2960
    if fields and not numfields.Matches(fields[-1]):
2961
      mlens[-1] = 0
2962

    
2963
  for line in data:
2964
    args = []
2965
    if line is None:
2966
      line = ["-" for _ in fields]
2967
    for idx in range(len(fields)):
2968
      if separator is None:
2969
        args.append(mlens[idx])
2970
      args.append(line[idx])
2971
    result.append(format_str % tuple(args))
2972

    
2973
  return result
2974

    
2975

    
2976
def _FormatBool(value):
2977
  """Formats a boolean value as a string.
2978

2979
  """
2980
  if value:
2981
    return "Y"
2982
  return "N"
2983

    
2984

    
2985
#: Default formatting for query results; (callback, align right)
2986
_DEFAULT_FORMAT_QUERY = {
2987
  constants.QFT_TEXT: (str, False),
2988
  constants.QFT_BOOL: (_FormatBool, False),
2989
  constants.QFT_NUMBER: (str, True),
2990
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2991
  constants.QFT_OTHER: (str, False),
2992
  constants.QFT_UNKNOWN: (str, False),
2993
  }
2994

    
2995

    
2996
def _GetColumnFormatter(fdef, override, unit):
2997
  """Returns formatting function for a field.
2998

2999
  @type fdef: L{objects.QueryFieldDefinition}
3000
  @type override: dict
3001
  @param override: Dictionary for overriding field formatting functions,
3002
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3003
  @type unit: string
3004
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3005
  @rtype: tuple; (callable, bool)
3006
  @return: Returns the function to format a value (takes one parameter) and a
3007
    boolean for aligning the value on the right-hand side
3008

3009
  """
3010
  fmt = override.get(fdef.name, None)
3011
  if fmt is not None:
3012
    return fmt
3013

    
3014
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3015

    
3016
  if fdef.kind == constants.QFT_UNIT:
3017
    # Can't keep this information in the static dictionary
3018
    return (lambda value: utils.FormatUnit(value, unit), True)
3019

    
3020
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3021
  if fmt is not None:
3022
    return fmt
3023

    
3024
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3025

    
3026

    
3027
class _QueryColumnFormatter:
3028
  """Callable class for formatting fields of a query.
3029

3030
  """
3031
  def __init__(self, fn, status_fn, verbose):
3032
    """Initializes this class.
3033

3034
    @type fn: callable
3035
    @param fn: Formatting function
3036
    @type status_fn: callable
3037
    @param status_fn: Function to report fields' status
3038
    @type verbose: boolean
3039
    @param verbose: whether to use verbose field descriptions or not
3040

3041
    """
3042
    self._fn = fn
3043
    self._status_fn = status_fn
3044
    self._verbose = verbose
3045

    
3046
  def __call__(self, data):
3047
    """Returns a field's string representation.
3048

3049
    """
3050
    (status, value) = data
3051

    
3052
    # Report status
3053
    self._status_fn(status)
3054

    
3055
    if status == constants.RS_NORMAL:
3056
      return self._fn(value)
3057

    
3058
    assert value is None, \
3059
           "Found value %r for abnormal status %s" % (value, status)
3060

    
3061
    return FormatResultError(status, self._verbose)
3062

    
3063

    
3064
def FormatResultError(status, verbose):
3065
  """Formats result status other than L{constants.RS_NORMAL}.
3066

3067
  @param status: The result status
3068
  @type verbose: boolean
3069
  @param verbose: Whether to return the verbose text
3070
  @return: Text of result status
3071

3072
  """
3073
  assert status != constants.RS_NORMAL, \
3074
         "FormatResultError called with status equal to constants.RS_NORMAL"
3075
  try:
3076
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3077
  except KeyError:
3078
    raise NotImplementedError("Unknown status %s" % status)
3079
  else:
3080
    if verbose:
3081
      return verbose_text
3082
    return normal_text
3083

    
3084

    
3085
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3086
                      header=False, verbose=False):
3087
  """Formats data in L{objects.QueryResponse}.
3088

3089
  @type result: L{objects.QueryResponse}
3090
  @param result: result of query operation
3091
  @type unit: string
3092
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3093
    see L{utils.text.FormatUnit}
3094
  @type format_override: dict
3095
  @param format_override: Dictionary for overriding field formatting functions,
3096
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3097
  @type separator: string or None
3098
  @param separator: String used to separate fields
3099
  @type header: bool
3100
  @param header: Whether to output header row
3101
  @type verbose: boolean
3102
  @param verbose: whether to use verbose field descriptions or not
3103

3104
  """
3105
  if unit is None:
3106
    if separator:
3107
      unit = "m"
3108
    else:
3109
      unit = "h"
3110

    
3111
  if format_override is None:
3112
    format_override = {}
3113

    
3114
  stats = dict.fromkeys(constants.RS_ALL, 0)
3115

    
3116
  def _RecordStatus(status):
3117
    if status in stats:
3118
      stats[status] += 1
3119

    
3120
  columns = []
3121
  for fdef in result.fields:
3122
    assert fdef.title and fdef.name
3123
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3124
    columns.append(TableColumn(fdef.title,
3125
                               _QueryColumnFormatter(fn, _RecordStatus,
3126
                                                     verbose),
3127
                               align_right))
3128

    
3129
  table = FormatTable(result.data, columns, header, separator)
3130

    
3131
  # Collect statistics
3132
  assert len(stats) == len(constants.RS_ALL)
3133
  assert compat.all(count >= 0 for count in stats.values())
3134

    
3135
  # Determine overall status. If there was no data, unknown fields must be
3136
  # detected via the field definitions.
3137
  if (stats[constants.RS_UNKNOWN] or
3138
      (not result.data and _GetUnknownFields(result.fields))):
3139
    status = QR_UNKNOWN
3140
  elif compat.any(count > 0 for key, count in stats.items()
3141
                  if key != constants.RS_NORMAL):
3142
    status = QR_INCOMPLETE
3143
  else:
3144
    status = QR_NORMAL
3145

    
3146
  return (status, table)
3147

    
3148

    
3149
def _GetUnknownFields(fdefs):
3150
  """Returns list of unknown fields included in C{fdefs}.
3151

3152
  @type fdefs: list of L{objects.QueryFieldDefinition}
3153

3154
  """
3155
  return [fdef for fdef in fdefs
3156
          if fdef.kind == constants.QFT_UNKNOWN]
3157

    
3158

    
3159
def _WarnUnknownFields(fdefs):
3160
  """Prints a warning to stderr if a query included unknown fields.
3161

3162
  @type fdefs: list of L{objects.QueryFieldDefinition}
3163

3164
  """
3165
  unknown = _GetUnknownFields(fdefs)
3166
  if unknown:
3167
    ToStderr("Warning: Queried for unknown fields %s",
3168
             utils.CommaJoin(fdef.name for fdef in unknown))
3169
    return True
3170

    
3171
  return False
3172

    
3173

    
3174
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3175
                format_override=None, verbose=False, force_filter=False,
3176
                namefield=None, qfilter=None, isnumeric=False):
3177
  """Generic implementation for listing all items of a resource.
3178

3179
  @param resource: One of L{constants.QR_VIA_LUXI}
3180
  @type fields: list of strings
3181
  @param fields: List of fields to query for
3182
  @type names: list of strings
3183
  @param names: Names of items to query for
3184
  @type unit: string or None
3185
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3186
    None for automatic choice (human-readable for non-separator usage,
3187
    otherwise megabytes); this is a one-letter string
3188
  @type separator: string or None
3189
  @param separator: String used to separate fields
3190
  @type header: bool
3191
  @param header: Whether to show header row
3192
  @type force_filter: bool
3193
  @param force_filter: Whether to always treat names as filter
3194
  @type format_override: dict
3195
  @param format_override: Dictionary for overriding field formatting functions,
3196
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3197
  @type verbose: boolean
3198
  @param verbose: whether to use verbose field descriptions or not
3199
  @type namefield: string
3200
  @param namefield: Name of field to use for simple filters (see
3201
    L{qlang.MakeFilter} for details)
3202
  @type qfilter: list or None
3203
  @param qfilter: Query filter (in addition to names)
3204
  @param isnumeric: bool
3205
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3206
    any simple filters built by namefield should use integer values to
3207
    reflect that
3208

3209
  """
3210
  if not names:
3211
    names = None
3212

    
3213
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3214
                                isnumeric=isnumeric)
3215

    
3216
  if qfilter is None:
3217
    qfilter = namefilter
3218
  elif namefilter is not None:
3219
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3220

    
3221
  if cl is None:
3222
    cl = GetClient()
3223

    
3224
  response = cl.Query(resource, fields, qfilter)
3225

    
3226
  found_unknown = _WarnUnknownFields(response.fields)
3227

    
3228
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3229
                                     header=header,
3230
                                     format_override=format_override,
3231
                                     verbose=verbose)
3232

    
3233
  for line in data:
3234
    ToStdout(line)
3235

    
3236
  assert ((found_unknown and status == QR_UNKNOWN) or
3237
          (not found_unknown and status != QR_UNKNOWN))
3238

    
3239
  if status == QR_UNKNOWN:
3240
    return constants.EXIT_UNKNOWN_FIELD
3241

    
3242
  # TODO: Should the list command fail if not all data could be collected?
3243
  return constants.EXIT_SUCCESS
3244

    
3245

    
3246
def _FieldDescValues(fdef):
3247
  """Helper function for L{GenericListFields} to get query field description.
3248

3249
  @type fdef: L{objects.QueryFieldDefinition}
3250
  @rtype: list
3251

3252
  """
3253
  return [
3254
    fdef.name,
3255
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3256
    fdef.title,
3257
    fdef.doc,
3258
    ]
3259

    
3260

    
3261
def GenericListFields(resource, fields, separator, header, cl=None):
3262
  """Generic implementation for listing fields for a resource.
3263

3264
  @param resource: One of L{constants.QR_VIA_LUXI}
3265
  @type fields: list of strings
3266
  @param fields: List of fields to query for
3267
  @type separator: string or None
3268
  @param separator: String used to separate fields
3269
  @type header: bool
3270
  @param header: Whether to show header row
3271

3272
  """
3273
  if cl is None:
3274
    cl = GetClient()
3275

    
3276
  if not fields:
3277
    fields = None
3278

    
3279
  response = cl.QueryFields(resource, fields)
3280

    
3281
  found_unknown = _WarnUnknownFields(response.fields)
3282

    
3283
  columns = [
3284
    TableColumn("Name", str, False),
3285
    TableColumn("Type", str, False),
3286
    TableColumn("Title", str, False),
3287
    TableColumn("Description", str, False),
3288
    ]
3289

    
3290
  rows = map(_FieldDescValues, response.fields)
3291

    
3292
  for line in FormatTable(rows, columns, header, separator):
3293
    ToStdout(line)
3294

    
3295
  if found_unknown:
3296
    return constants.EXIT_UNKNOWN_FIELD
3297

    
3298
  return constants.EXIT_SUCCESS
3299

    
3300

    
3301
class TableColumn:
3302
  """Describes a column for L{FormatTable}.
3303

3304
  """
3305
  def __init__(self, title, fn, align_right):
3306
    """Initializes this class.
3307

3308
    @type title: string
3309
    @param title: Column title
3310
    @type fn: callable
3311
    @param fn: Formatting function
3312
    @type align_right: bool
3313
    @param align_right: Whether to align values on the right-hand side
3314

3315
    """
3316
    self.title = title
3317
    self.format = fn
3318
    self.align_right = align_right
3319

    
3320

    
3321
def _GetColFormatString(width, align_right):
3322
  """Returns the format string for a field.
3323

3324
  """
3325
  if align_right:
3326
    sign = ""
3327
  else:
3328
    sign = "-"
3329

    
3330
  return "%%%s%ss" % (sign, width)
3331

    
3332

    
3333
def FormatTable(rows, columns, header, separator):
3334
  """Formats data as a table.
3335

3336
  @type rows: list of lists
3337
  @param rows: Row data, one list per row
3338
  @type columns: list of L{TableColumn}
3339
  @param columns: Column descriptions
3340
  @type header: bool
3341
  @param header: Whether to show header row
3342
  @type separator: string or None
3343
  @param separator: String used to separate columns
3344

3345
  """
3346
  if header:
3347
    data = [[col.title for col in columns]]
3348
    colwidth = [len(col.title) for col in columns]
3349
  else:
3350
    data = []
3351
    colwidth = [0 for _ in columns]
3352

    
3353
  # Format row data
3354
  for row in rows:
3355
    assert len(row) == len(columns)
3356

    
3357
    formatted = [col.format(value) for value, col in zip(row, columns)]
3358

    
3359
    if separator is None:
3360
      # Update column widths
3361
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3362
        # Modifying a list's items while iterating is fine
3363
        colwidth[idx] = max(oldwidth, len(value))
3364

    
3365
    data.append(formatted)
3366

    
3367
  if separator is not None:
3368
    # Return early if a separator is used
3369
    return [separator.join(row) for row in data]
3370

    
3371
  if columns and not columns[-1].align_right:
3372
    # Avoid unnecessary spaces at end of line
3373
    colwidth[-1] = 0
3374

    
3375
  # Build format string
3376
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3377
                  for col, width in zip(columns, colwidth)])
3378

    
3379
  return [fmt % tuple(row) for row in data]
3380

    
3381

    
3382
def FormatTimestamp(ts):
3383
  """Formats a given timestamp.
3384

3385
  @type ts: timestamp
3386
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3387

3388
  @rtype: string
3389
  @return: a string with the formatted timestamp
3390

3391
  """
3392
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3393
    return "?"
3394

    
3395
  (sec, usecs) = ts
3396
  return utils.FormatTime(sec, usecs=usecs)
3397

    
3398

    
3399
def ParseTimespec(value):
3400
  """Parse a time specification.
3401

3402
  The following suffixed will be recognized:
3403

3404
    - s: seconds
3405
    - m: minutes
3406
    - h: hours
3407
    - d: day
3408
    - w: weeks
3409

3410
  Without any suffix, the value will be taken to be in seconds.
3411

3412
  """
3413
  value = str(value)
3414
  if not value:
3415
    raise errors.OpPrereqError("Empty time specification passed",
3416
                               errors.ECODE_INVAL)
3417
  suffix_map = {
3418
    "s": 1,
3419
    "m": 60,
3420
    "h": 3600,
3421
    "d": 86400,
3422
    "w": 604800,
3423
    }
3424
  if value[-1] not in suffix_map:
3425
    try:
3426
      value = int(value)
3427
    except (TypeError, ValueError):
3428
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3429
                                 errors.ECODE_INVAL)
3430
  else:
3431
    multiplier = suffix_map[value[-1]]
3432
    value = value[:-1]
3433
    if not value: # no data left after stripping the suffix
3434
      raise errors.OpPrereqError("Invalid time specification (only"
3435
                                 " suffix passed)", errors.ECODE_INVAL)
3436
    try:
3437
      value = int(value) * multiplier
3438
    except (TypeError, ValueError):
3439
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3440
                                 errors.ECODE_INVAL)
3441
  return value
3442

    
3443

    
3444
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3445
                   filter_master=False, nodegroup=None):
3446
  """Returns the names of online nodes.
3447

3448
  This function will also log a warning on stderr with the names of
3449
  the online nodes.
3450

3451
  @param nodes: if not empty, use only this subset of nodes (minus the
3452
      offline ones)
3453
  @param cl: if not None, luxi client to use
3454
  @type nowarn: boolean
3455
  @param nowarn: by default, this function will output a note with the
3456
      offline nodes that are skipped; if this parameter is True the
3457
      note is not displayed
3458
  @type secondary_ips: boolean
3459
  @param secondary_ips: if True, return the secondary IPs instead of the
3460
      names, useful for doing network traffic over the replication interface
3461
      (if any)
3462
  @type filter_master: boolean
3463
  @param filter_master: if True, do not return the master node in the list
3464
      (useful in coordination with secondary_ips where we cannot check our
3465
      node name against the list)
3466
  @type nodegroup: string
3467
  @param nodegroup: If set, only return nodes in this node group
3468

3469
  """
3470
  if cl is None:
3471
    cl = GetClient()
3472

    
3473
  qfilter = []
3474

    
3475
  if nodes:
3476
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3477

    
3478
  if nodegroup is not None:
3479
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3480
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3481

    
3482
  if filter_master:
3483
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3484

    
3485
  if qfilter:
3486
    if len(qfilter) > 1:
3487
      final_filter = [qlang.OP_AND] + qfilter
3488
    else:
3489
      assert len(qfilter) == 1
3490
      final_filter = qfilter[0]
3491
  else:
3492
    final_filter = None
3493

    
3494
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3495

    
3496
  def _IsOffline(row):
3497
    (_, (_, offline), _) = row
3498
    return offline
3499

    
3500
  def _GetName(row):
3501
    ((_, name), _, _) = row
3502
    return name
3503

    
3504
  def _GetSip(row):
3505
    (_, _, (_, sip)) = row
3506
    return sip
3507

    
3508
  (offline, online) = compat.partition(result.data, _IsOffline)
3509

    
3510
  if offline and not nowarn:
3511
    ToStderr("Note: skipping offline node(s): %s" %
3512
             utils.CommaJoin(map(_GetName, offline)))
3513

    
3514
  if secondary_ips:
3515
    fn = _GetSip
3516
  else:
3517
    fn = _GetName
3518

    
3519
  return map(fn, online)
3520

    
3521

    
3522
def _ToStream(stream, txt, *args):
3523
  """Write a message to a stream, bypassing the logging system
3524

3525
  @type stream: file object
3526
  @param stream: the file to which we should write
3527
  @type txt: str
3528
  @param txt: the message
3529

3530
  """
3531
  try:
3532
    if args:
3533
      args = tuple(args)
3534
      stream.write(txt % args)
3535
    else:
3536
      stream.write(txt)
3537
    stream.write("\n")
3538
    stream.flush()
3539
  except IOError, err:
3540
    if err.errno == errno.EPIPE:
3541
      # our terminal went away, we'll exit
3542
      sys.exit(constants.EXIT_FAILURE)
3543
    else:
3544
      raise
3545

    
3546

    
3547
def ToStdout(txt, *args):
3548
  """Write a message to stdout only, bypassing the logging system
3549

3550
  This is just a wrapper over _ToStream.
3551

3552
  @type txt: str
3553
  @param txt: the message
3554

3555
  """
3556
  _ToStream(sys.stdout, txt, *args)
3557

    
3558

    
3559
def ToStderr(txt, *args):
3560
  """Write a message to stderr only, bypassing the logging system
3561

3562
  This is just a wrapper over _ToStream.
3563

3564
  @type txt: str
3565
  @param txt: the message
3566

3567
  """
3568
  _ToStream(sys.stderr, txt, *args)
3569

    
3570

    
3571
class JobExecutor(object):
3572
  """Class which manages the submission and execution of multiple jobs.
3573

3574
  Note that instances of this class should not be reused between
3575
  GetResults() calls.
3576

3577
  """
3578
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3579
    self.queue = []
3580
    if cl is None:
3581
      cl = GetClient()
3582
    self.cl = cl
3583
    self.verbose = verbose
3584
    self.jobs = []
3585
    self.opts = opts
3586
    self.feedback_fn = feedback_fn
3587
    self._counter = itertools.count()
3588

    
3589
  @staticmethod
3590
  def _IfName(name, fmt):
3591
    """Helper function for formatting name.
3592

3593
    """
3594
    if name:
3595
      return fmt % name
3596

    
3597
    return ""
3598

    
3599
  def QueueJob(self, name, *ops):
3600
    """Record a job for later submit.
3601

3602
    @type name: string
3603
    @param name: a description of the job, will be used in WaitJobSet
3604

3605
    """
3606
    SetGenericOpcodeOpts(ops, self.opts)
3607
    self.queue.append((self._counter.next(), name, ops))
3608

    
3609
  def AddJobId(self, name, status, job_id):
3610
    """Adds a job ID to the internal queue.
3611

3612
    """
3613
    self.jobs.append((self._counter.next(), status, job_id, name))
3614

    
3615
  def SubmitPending(self, each=False):
3616
    """Submit all pending jobs.
3617

3618
    """
3619
    if each:
3620
      results = []
3621
      for (_, _, ops) in self.queue:
3622
        # SubmitJob will remove the success status, but raise an exception if
3623
        # the submission fails, so we'll notice that anyway.
3624
        results.append([True, self.cl.SubmitJob(ops)[0]])
3625
    else:
3626
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3627
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3628
      self.jobs.append((idx, status, data, name))
3629

    
3630
  def _ChooseJob(self):
3631
    """Choose a non-waiting/queued job to poll next.
3632

3633
    """
3634
    assert self.jobs, "_ChooseJob called with empty job list"
3635

    
3636
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3637
                               ["status"])
3638
    assert result
3639

    
3640
    for job_data, status in zip(self.jobs, result):
3641
      if (isinstance(status, list) and status and
3642
          status[0] in (constants.JOB_STATUS_QUEUED,
3643
                        constants.JOB_STATUS_WAITING,
3644
                        constants.JOB_STATUS_CANCELING)):
3645
        # job is still present and waiting
3646
        continue
3647
      # good candidate found (either running job or lost job)
3648
      self.jobs.remove(job_data)
3649
      return job_data
3650

    
3651
    # no job found
3652
    return self.jobs.pop(0)
3653

    
3654
  def GetResults(self):
3655
    """Wait for and return the results of all jobs.
3656

3657
    @rtype: list
3658
    @return: list of tuples (success, job results), in the same order
3659
        as the submitted jobs; if a job has failed, instead of the result
3660
        there will be the error message
3661

3662
    """
3663
    if not self.jobs:
3664
      self.SubmitPending()
3665
    results = []
3666
    if self.verbose:
3667
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3668
      if ok_jobs:
3669
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3670

    
3671
    # first, remove any non-submitted jobs
3672
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3673
    for idx, _, jid, name in failures:
3674
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3675
      results.append((idx, False, jid))
3676

    
3677
    while self.jobs:
3678
      (idx, _, jid, name) = self._ChooseJob()
3679
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3680
      try:
3681
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3682
        success = True
3683
      except errors.JobLost, err:
3684
        _, job_result = FormatError(err)
3685
        ToStderr("Job %s%s has been archived, cannot check its result",
3686
                 jid, self._IfName(name, " for %s"))
3687
        success = False
3688
      except (errors.GenericError, luxi.ProtocolError), err:
3689
        _, job_result = FormatError(err)
3690
        success = False
3691
        # the error message will always be shown, verbose or not
3692
        ToStderr("Job %s%s has failed: %s",
3693
                 jid, self._IfName(name, " for %s"), job_result)
3694

    
3695
      results.append((idx, success, job_result))
3696

    
3697
    # sort based on the index, then drop it
3698
    results.sort()
3699
    results = [i[1:] for i in results]
3700

    
3701
    return results
3702

    
3703
  def WaitOrShow(self, wait):
3704
    """Wait for job results or only print the job IDs.
3705

3706
    @type wait: boolean
3707
    @param wait: whether to wait or not
3708

3709
    """
3710
    if wait:
3711
      return self.GetResults()
3712
    else:
3713
      if not self.jobs:
3714
        self.SubmitPending()
3715
      for _, status, result, name in self.jobs:
3716
        if status:
3717
          ToStdout("%s: %s", result, name)
3718
        else:
3719
          ToStderr("Failure for %s: %s", name, result)
3720
      return [row[1:3] for row in self.jobs]
3721

    
3722

    
3723
def FormatParamsDictInfo(param_dict, actual):
3724
  """Formats a parameter dictionary.
3725

3726
  @type param_dict: dict
3727
  @param param_dict: the own parameters
3728
  @type actual: dict
3729
  @param actual: the current parameter set (including defaults)
3730
  @rtype: dict
3731
  @return: dictionary where the value of each parameter is either a fully
3732
      formatted string or a dictionary containing formatted strings
3733

3734
  """
3735
  ret = {}
3736
  for (key, data) in actual.items():
3737
    if isinstance(data, dict) and data:
3738
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3739
    else:
3740
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3741
  return ret
3742

    
3743

    
3744
def _FormatListInfoDefault(data, def_data):
3745
  if data is not None:
3746
    ret = utils.CommaJoin(data)
3747
  else:
3748
    ret = "default (%s)" % utils.CommaJoin(def_data)
3749
  return ret
3750

    
3751

    
3752
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3753
  """Formats an instance policy.
3754

3755
  @type custom_ipolicy: dict
3756
  @param custom_ipolicy: own policy
3757
  @type eff_ipolicy: dict
3758
  @param eff_ipolicy: effective policy (including defaults); ignored for
3759
      cluster
3760
  @type iscluster: bool
3761
  @param iscluster: the policy is at cluster level
3762
  @rtype: list of pairs
3763
  @return: formatted data, suitable for L{PrintGenericInfo}
3764

3765
  """
3766
  if iscluster:
3767
    eff_ipolicy = custom_ipolicy
3768

    
3769
  minmax_out = []
3770
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3771
  if custom_minmax:
3772
    for (k, minmax) in enumerate(custom_minmax):
3773
      minmax_out.append([
3774
        ("%s/%s" % (key, k),
3775
         FormatParamsDictInfo(minmax[key], minmax[key]))
3776
        for key in constants.ISPECS_MINMAX_KEYS
3777
        ])
3778
  else:
3779
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3780
      minmax_out.append([
3781
        ("%s/%s" % (key, k),
3782
         FormatParamsDictInfo({}, minmax[key]))
3783
        for key in constants.ISPECS_MINMAX_KEYS
3784
        ])
3785
  ret = [("bounds specs", minmax_out)]
3786

    
3787
  if iscluster:
3788
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3789
    ret.append(
3790
      (constants.ISPECS_STD,
3791
       FormatParamsDictInfo(stdspecs, stdspecs))
3792
      )
3793

    
3794
  ret.append(
3795
    ("allowed disk templates",
3796
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3797
                            eff_ipolicy[constants.IPOLICY_DTS]))
3798
    )
3799
  ret.extend([
3800
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3801
    for key in constants.IPOLICY_PARAMETERS
3802
    ])
3803
  return ret
3804

    
3805

    
3806
def _PrintSpecsParameters(buf, specs):
3807
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3808
  buf.write(",".join(values))
3809

    
3810

    
3811
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3812
  """Print the command option used to generate the given instance policy.
3813

3814
  Currently only the parts dealing with specs are supported.
3815

3816
  @type buf: StringIO
3817
  @param buf: stream to write into
3818
  @type ipolicy: dict
3819
  @param ipolicy: instance policy
3820
  @type isgroup: bool
3821
  @param isgroup: whether the policy is at group level
3822

3823
  """
3824
  if not isgroup:
3825
    stdspecs = ipolicy.get("std")
3826
    if stdspecs:
3827
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3828
      _PrintSpecsParameters(buf, stdspecs)
3829
  minmaxes = ipolicy.get("minmax", [])
3830
  first = True
3831
  for minmax in minmaxes:
3832
    minspecs = minmax.get("min")
3833
    maxspecs = minmax.get("max")
3834
    if minspecs and maxspecs:
3835
      if first:
3836
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3837
        first = False
3838
      else:
3839
        buf.write("//")
3840
      buf.write("min:")
3841
      _PrintSpecsParameters(buf, minspecs)
3842
      buf.write("/max:")
3843
      _PrintSpecsParameters(buf, maxspecs)
3844

    
3845

    
3846
def ConfirmOperation(names, list_type, text, extra=""):
3847
  """Ask the user to confirm an operation on a list of list_type.
3848

3849
  This function is used to request confirmation for doing an operation
3850
  on a given list of list_type.
3851

3852
  @type names: list
3853
  @param names: the list of names that we display when
3854
      we ask for confirmation
3855
  @type list_type: str
3856
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3857
  @type text: str
3858
  @param text: the operation that the user should confirm
3859
  @rtype: boolean
3860
  @return: True or False depending on user's confirmation.
3861

3862
  """
3863
  count = len(names)
3864
  msg = ("The %s will operate on %d %s.\n%s"
3865
         "Do you want to continue?" % (text, count, list_type, extra))
3866
  affected = (("\nAffected %s:\n" % list_type) +
3867
              "\n".join(["  %s" % name for name in names]))
3868

    
3869
  choices = [("y", True, "Yes, execute the %s" % text),
3870
             ("n", False, "No, abort the %s" % text)]
3871

    
3872
  if count > 20:
3873
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3874
    question = msg
3875
  else:
3876
    question = msg + affected
3877

    
3878
  choice = AskUser(question, choices)
3879
  if choice == "v":
3880
    choices.pop(1)
3881
    choice = AskUser(msg + affected, choices)
3882
  return choice
3883

    
3884

    
3885
def _MaybeParseUnit(elements):
3886
  """Parses and returns an array of potential values with units.
3887

3888
  """
3889
  parsed = {}
3890
  for k, v in elements.items():
3891
    if v == constants.VALUE_DEFAULT:
3892
      parsed[k] = v
3893
    else:
3894
      parsed[k] = utils.ParseUnit(v)
3895
  return parsed
3896

    
3897

    
3898
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3899
                             ispecs_disk_count, ispecs_disk_size,
3900
                             ispecs_nic_count, group_ipolicy, fill_all):
3901
  try:
3902
    if ispecs_mem_size:
3903
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3904
    if ispecs_disk_size:
3905
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3906
  except (TypeError, ValueError, errors.UnitParseError), err:
3907
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3908
                               " in policy: %s" %
3909
                               (ispecs_disk_size, ispecs_mem_size, err),
3910
                               errors.ECODE_INVAL)
3911

    
3912
  # prepare ipolicy dict
3913
  ispecs_transposed = {
3914
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3915
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3916
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3917
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3918
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3919
    }
3920

    
3921
  # first, check that the values given are correct
3922
  if group_ipolicy:
3923
    forced_type = TISPECS_GROUP_TYPES
3924
  else:
3925
    forced_type = TISPECS_CLUSTER_TYPES
3926
  for specs in ispecs_transposed.values():
3927
    assert type(specs) is dict
3928
    utils.ForceDictType(specs, forced_type)
3929

    
3930
  # then transpose
3931
  ispecs = {
3932
    constants.ISPECS_MIN: {},
3933
    constants.ISPECS_MAX: {},
3934
    constants.ISPECS_STD: {},
3935
    }
3936
  for (name, specs) in ispecs_transposed.iteritems():
3937
    assert name in constants.ISPECS_PARAMETERS
3938
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3939
      assert key in ispecs
3940
      ispecs[key][name] = val
3941
  minmax_out = {}
3942
  for key in constants.ISPECS_MINMAX_KEYS:
3943
    if fill_all:
3944
      minmax_out[key] = \
3945
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3946
    else:
3947
      minmax_out[key] = ispecs[key]
3948
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3949
  if fill_all:
3950
    ipolicy[constants.ISPECS_STD] = \
3951
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3952
                         ispecs[constants.ISPECS_STD])
3953
  else:
3954
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3955

    
3956

    
3957
def _ParseSpecUnit(spec, keyname):
3958
  ret = spec.copy()
3959
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3960
    if k in ret:
3961
      try:
3962
        ret[k] = utils.ParseUnit(ret[k])
3963
      except (TypeError, ValueError, errors.UnitParseError), err:
3964
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3965
                                    " specs: %s" % (k, ret[k], keyname, err)),
3966
                                   errors.ECODE_INVAL)
3967
  return ret
3968

    
3969

    
3970
def _ParseISpec(spec, keyname, required):
3971
  ret = _ParseSpecUnit(spec, keyname)
3972
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3973
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3974
  if required and missing:
3975
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3976
                               (keyname, utils.CommaJoin(missing)),
3977
                               errors.ECODE_INVAL)
3978
  return ret
3979

    
3980

    
3981
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3982
  ret = None
3983
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3984
      len(minmax_ispecs[0]) == 1):
3985
    for (key, spec) in minmax_ispecs[0].items():
3986
      # This loop is executed exactly once
3987
      if key in allowed_values and not spec:
3988
        ret = key
3989
  return ret
3990

    
3991

    
3992
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3993
                            group_ipolicy, allowed_values):
3994
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3995
  if found_allowed is not None:
3996
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3997
  elif minmax_ispecs is not None:
3998
    minmax_out = []
3999
    for mmpair in minmax_ispecs:
4000
      mmpair_out = {}
4001
      for (key, spec) in mmpair.items():
4002
        if key not in constants.ISPECS_MINMAX_KEYS:
4003
          msg = "Invalid key in bounds instance specifications: %s" % key
4004
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4005
        mmpair_out[key] = _ParseISpec(spec, key, True)
4006
      minmax_out.append(mmpair_out)
4007
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4008
  if std_ispecs is not None:
4009
    assert not group_ipolicy # This is not an option for gnt-group
4010
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4011

    
4012

    
4013
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4014
                          ispecs_cpu_count=None,
4015
                          ispecs_disk_count=None,
4016
                          ispecs_disk_size=None,
4017
                          ispecs_nic_count=None,
4018
                          minmax_ispecs=None,
4019
                          std_ispecs=None,
4020
                          ipolicy_disk_templates=None,
4021
                          ipolicy_vcpu_ratio=None,
4022
                          ipolicy_spindle_ratio=None,
4023
                          group_ipolicy=False,
4024
                          allowed_values=None,
4025
                          fill_all=False):
4026
  """Creation of instance policy based on command line options.
4027

4028
  @param fill_all: whether for cluster policies we should ensure that
4029
    all values are filled
4030

4031
  """
4032
  assert not (fill_all and allowed_values)
4033

    
4034
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4035
                 ispecs_disk_size or ispecs_nic_count)
4036
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4037
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4038
                               " together with any --ipolicy-xxx-specs option",
4039
                               errors.ECODE_INVAL)
4040

    
4041
  ipolicy_out = objects.MakeEmptyIPolicy()
4042
  if split_specs:
4043
    assert fill_all
4044
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4045
                             ispecs_disk_count, ispecs_disk_size,
4046
                             ispecs_nic_count, group_ipolicy, fill_all)
4047
  elif (minmax_ispecs is not None or std_ispecs is not None):
4048
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4049
                            group_ipolicy, allowed_values)
4050

    
4051
  if ipolicy_disk_templates is not None:
4052
    if allowed_values and ipolicy_disk_templates in allowed_values:
4053
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4054
    else:
4055
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4056
  if ipolicy_vcpu_ratio is not None:
4057
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4058
  if ipolicy_spindle_ratio is not None:
4059
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4060

    
4061
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4062

    
4063
  if not group_ipolicy and fill_all:
4064
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4065

    
4066
  return ipolicy_out
4067

    
4068

    
4069
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4070
  """Formatting core of L{PrintGenericInfo}.
4071

4072
  @param buf: (string) stream to accumulate the result into
4073
  @param data: data to format
4074
  @type level: int
4075
  @param level: depth in the data hierarchy, used for indenting
4076
  @type afterkey: bool
4077
  @param afterkey: True when we are in the middle of a line after a key (used
4078
      to properly add newlines or indentation)
4079

4080
  """
4081
  baseind = "  "
4082
  if isinstance(data, dict):
4083
    if not data:
4084
      buf.write("\n")
4085
    else:
4086
      if afterkey:
4087
        buf.write("\n")
4088
        doindent = True
4089
      else:
4090
        doindent = False
4091
      for key in sorted(data):
4092
        if doindent:
4093
          buf.write(baseind * level)
4094
        else:
4095
          doindent = True
4096
        buf.write(key)
4097
        buf.write(": ")
4098
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4099
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4100
    # list of tuples (an ordered dictionary)
4101
    if afterkey:
4102
      buf.write("\n")
4103
      doindent = True
4104
    else:
4105
      doindent = False
4106
    for (key, val) in data:
4107
      if doindent:
4108
        buf.write(baseind * level)
4109
      else:
4110
        doindent = True
4111
      buf.write(key)
4112
      buf.write(": ")
4113
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4114
  elif isinstance(data, list):
4115
    if not data:
4116
      buf.write("\n")
4117
    else:
4118
      if afterkey:
4119
        buf.write("\n")
4120
        doindent = True
4121
      else:
4122
        doindent = False
4123
      for item in data:
4124
        if doindent:
4125
          buf.write(baseind * level)
4126
        else:
4127
          doindent = True
4128
        buf.write("-")
4129
        buf.write(baseind[1:])
4130
        _SerializeGenericInfo(buf, item, level + 1)
4131
  else:
4132
    # This branch should be only taken for strings, but it's practically
4133
    # impossible to guarantee that no other types are produced somewhere
4134
    buf.write(str(data))
4135
    buf.write("\n")
4136

    
4137

    
4138
def PrintGenericInfo(data):
4139
  """Print information formatted according to the hierarchy.
4140

4141
  The output is a valid YAML string.
4142

4143
  @param data: the data to print. It's a hierarchical structure whose elements
4144
      can be:
4145
        - dictionaries, where keys are strings and values are of any of the
4146
          types listed here
4147
        - lists of pairs (key, value), where key is a string and value is of
4148
          any of the types listed here; it's a way to encode ordered
4149
          dictionaries
4150
        - lists of any of the types listed here
4151
        - strings
4152

4153
  """
4154
  buf = StringIO()
4155
  _SerializeGenericInfo(buf, data, 0)
4156
  ToStdout(buf.getvalue().rstrip("\n"))