Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 3cac836b

History | View | Annotate | Download (137.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HOTPLUG_OPT",
99
  "HOTPLUG_IF_POSSIBLE_OPT",
100
  "HVLIST_OPT",
101
  "HVOPTS_OPT",
102
  "HYPERVISOR_OPT",
103
  "IALLOCATOR_OPT",
104
  "DEFAULT_IALLOCATOR_OPT",
105
  "IDENTIFY_DEFAULTS_OPT",
106
  "IGNORE_CONSIST_OPT",
107
  "IGNORE_ERRORS_OPT",
108
  "IGNORE_FAILURES_OPT",
109
  "IGNORE_OFFLINE_OPT",
110
  "IGNORE_REMOVE_FAILURES_OPT",
111
  "IGNORE_SECONDARIES_OPT",
112
  "IGNORE_SIZE_OPT",
113
  "INCLUDEDEFAULTS_OPT",
114
  "INTERVAL_OPT",
115
  "MAC_PREFIX_OPT",
116
  "MAINTAIN_NODE_HEALTH_OPT",
117
  "MASTER_NETDEV_OPT",
118
  "MASTER_NETMASK_OPT",
119
  "MC_OPT",
120
  "MIGRATION_MODE_OPT",
121
  "MODIFY_ETCHOSTS_OPT",
122
  "NET_OPT",
123
  "NETWORK_OPT",
124
  "NETWORK6_OPT",
125
  "NEW_CLUSTER_CERT_OPT",
126
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
127
  "NEW_CONFD_HMAC_KEY_OPT",
128
  "NEW_RAPI_CERT_OPT",
129
  "NEW_PRIMARY_OPT",
130
  "NEW_SECONDARY_OPT",
131
  "NEW_SPICE_CERT_OPT",
132
  "NIC_PARAMS_OPT",
133
  "NOCONFLICTSCHECK_OPT",
134
  "NODE_FORCE_JOIN_OPT",
135
  "NODE_LIST_OPT",
136
  "NODE_PLACEMENT_OPT",
137
  "NODEGROUP_OPT",
138
  "NODE_PARAMS_OPT",
139
  "NODE_POWERED_OPT",
140
  "NOHDR_OPT",
141
  "NOIPCHECK_OPT",
142
  "NO_INSTALL_OPT",
143
  "NONAMECHECK_OPT",
144
  "NOLVM_STORAGE_OPT",
145
  "NOMODIFY_ETCHOSTS_OPT",
146
  "NOMODIFY_SSH_SETUP_OPT",
147
  "NONICS_OPT",
148
  "NONLIVE_OPT",
149
  "NONPLUS1_OPT",
150
  "NORUNTIME_CHGS_OPT",
151
  "NOSHUTDOWN_OPT",
152
  "NOSTART_OPT",
153
  "NOSSH_KEYCHECK_OPT",
154
  "NOVOTING_OPT",
155
  "NO_REMEMBER_OPT",
156
  "NWSYNC_OPT",
157
  "OFFLINE_INST_OPT",
158
  "ONLINE_INST_OPT",
159
  "ON_PRIMARY_OPT",
160
  "ON_SECONDARY_OPT",
161
  "OFFLINE_OPT",
162
  "OSPARAMS_OPT",
163
  "OS_OPT",
164
  "OS_SIZE_OPT",
165
  "OOB_TIMEOUT_OPT",
166
  "POWER_DELAY_OPT",
167
  "PREALLOC_WIPE_DISKS_OPT",
168
  "PRIMARY_IP_VERSION_OPT",
169
  "PRIMARY_ONLY_OPT",
170
  "PRINT_JOBID_OPT",
171
  "PRIORITY_OPT",
172
  "RAPI_CERT_OPT",
173
  "READD_OPT",
174
  "REASON_OPT",
175
  "REBOOT_TYPE_OPT",
176
  "REMOVE_INSTANCE_OPT",
177
  "REMOVE_RESERVED_IPS_OPT",
178
  "REMOVE_UIDS_OPT",
179
  "RESERVED_LVS_OPT",
180
  "RUNTIME_MEM_OPT",
181
  "ROMAN_OPT",
182
  "SECONDARY_IP_OPT",
183
  "SECONDARY_ONLY_OPT",
184
  "SELECT_OS_OPT",
185
  "SEP_OPT",
186
  "SHOWCMD_OPT",
187
  "SHOW_MACHINE_OPT",
188
  "SHUTDOWN_TIMEOUT_OPT",
189
  "SINGLE_NODE_OPT",
190
  "SPECS_CPU_COUNT_OPT",
191
  "SPECS_DISK_COUNT_OPT",
192
  "SPECS_DISK_SIZE_OPT",
193
  "SPECS_MEM_SIZE_OPT",
194
  "SPECS_NIC_COUNT_OPT",
195
  "SPLIT_ISPECS_OPTS",
196
  "IPOLICY_STD_SPECS_OPT",
197
  "IPOLICY_DISK_TEMPLATES",
198
  "IPOLICY_VCPU_RATIO",
199
  "SPICE_CACERT_OPT",
200
  "SPICE_CERT_OPT",
201
  "SRC_DIR_OPT",
202
  "SRC_NODE_OPT",
203
  "SUBMIT_OPT",
204
  "SUBMIT_OPTS",
205
  "STARTUP_PAUSED_OPT",
206
  "STATIC_OPT",
207
  "SYNC_OPT",
208
  "TAG_ADD_OPT",
209
  "TAG_SRC_OPT",
210
  "TIMEOUT_OPT",
211
  "TO_GROUP_OPT",
212
  "UIDPOOL_OPT",
213
  "USEUNITS_OPT",
214
  "USE_EXTERNAL_MIP_SCRIPT",
215
  "USE_REPL_NET_OPT",
216
  "VERBOSE_OPT",
217
  "VG_NAME_OPT",
218
  "WFSYNC_OPT",
219
  "YES_DOIT_OPT",
220
  "DISK_STATE_OPT",
221
  "HV_STATE_OPT",
222
  "IGNORE_IPOLICY_OPT",
223
  "INSTANCE_POLICY_OPTS",
224
  # Generic functions for CLI programs
225
  "ConfirmOperation",
226
  "CreateIPolicyFromOpts",
227
  "GenericMain",
228
  "GenericInstanceCreate",
229
  "GenericList",
230
  "GenericListFields",
231
  "GetClient",
232
  "GetOnlineNodes",
233
  "JobExecutor",
234
  "JobSubmittedException",
235
  "ParseTimespec",
236
  "RunWhileClusterStopped",
237
  "SubmitOpCode",
238
  "SubmitOpCodeToDrainedQueue",
239
  "SubmitOrSend",
240
  "UsesRPC",
241
  # Formatting functions
242
  "ToStderr", "ToStdout",
243
  "FormatError",
244
  "FormatQueryResult",
245
  "FormatParamsDictInfo",
246
  "FormatPolicyInfo",
247
  "PrintIPolicyCommand",
248
  "PrintGenericInfo",
249
  "GenerateTable",
250
  "AskUser",
251
  "FormatTimestamp",
252
  "FormatLogMessage",
253
  # Tags functions
254
  "ListTags",
255
  "AddTags",
256
  "RemoveTags",
257
  # command line options support infrastructure
258
  "ARGS_MANY_INSTANCES",
259
  "ARGS_MANY_NODES",
260
  "ARGS_MANY_GROUPS",
261
  "ARGS_MANY_NETWORKS",
262
  "ARGS_NONE",
263
  "ARGS_ONE_INSTANCE",
264
  "ARGS_ONE_NODE",
265
  "ARGS_ONE_GROUP",
266
  "ARGS_ONE_OS",
267
  "ARGS_ONE_NETWORK",
268
  "ArgChoice",
269
  "ArgCommand",
270
  "ArgFile",
271
  "ArgGroup",
272
  "ArgHost",
273
  "ArgInstance",
274
  "ArgJobId",
275
  "ArgNetwork",
276
  "ArgNode",
277
  "ArgOs",
278
  "ArgExtStorage",
279
  "ArgSuggest",
280
  "ArgUnknown",
281
  "OPT_COMPL_INST_ADD_NODES",
282
  "OPT_COMPL_MANY_NODES",
283
  "OPT_COMPL_ONE_IALLOCATOR",
284
  "OPT_COMPL_ONE_INSTANCE",
285
  "OPT_COMPL_ONE_NODE",
286
  "OPT_COMPL_ONE_NODEGROUP",
287
  "OPT_COMPL_ONE_NETWORK",
288
  "OPT_COMPL_ONE_OS",
289
  "OPT_COMPL_ONE_EXTSTORAGE",
290
  "cli_option",
291
  "SplitNodeOption",
292
  "CalculateOSNames",
293
  "ParseFields",
294
  "COMMON_CREATE_OPTS",
295
  ]
296

    
297
NO_PREFIX = "no_"
298
UN_PREFIX = "-"
299

    
300
#: Priorities (sorted)
301
_PRIORITY_NAMES = [
302
  ("low", constants.OP_PRIO_LOW),
303
  ("normal", constants.OP_PRIO_NORMAL),
304
  ("high", constants.OP_PRIO_HIGH),
305
  ]
306

    
307
#: Priority dictionary for easier lookup
308
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
309
# we migrate to Python 2.6
310
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
311

    
312
# Query result status for clients
313
(QR_NORMAL,
314
 QR_UNKNOWN,
315
 QR_INCOMPLETE) = range(3)
316

    
317
#: Maximum batch size for ChooseJob
318
_CHOOSE_BATCH = 25
319

    
320

    
321
# constants used to create InstancePolicy dictionary
322
TISPECS_GROUP_TYPES = {
323
  constants.ISPECS_MIN: constants.VTYPE_INT,
324
  constants.ISPECS_MAX: constants.VTYPE_INT,
325
  }
326

    
327
TISPECS_CLUSTER_TYPES = {
328
  constants.ISPECS_MIN: constants.VTYPE_INT,
329
  constants.ISPECS_MAX: constants.VTYPE_INT,
330
  constants.ISPECS_STD: constants.VTYPE_INT,
331
  }
332

    
333
#: User-friendly names for query2 field types
334
_QFT_NAMES = {
335
  constants.QFT_UNKNOWN: "Unknown",
336
  constants.QFT_TEXT: "Text",
337
  constants.QFT_BOOL: "Boolean",
338
  constants.QFT_NUMBER: "Number",
339
  constants.QFT_UNIT: "Storage size",
340
  constants.QFT_TIMESTAMP: "Timestamp",
341
  constants.QFT_OTHER: "Custom",
342
  }
343

    
344

    
345
class _Argument:
346
  def __init__(self, min=0, max=None): # pylint: disable=W0622
347
    self.min = min
348
    self.max = max
349

    
350
  def __repr__(self):
351
    return ("<%s min=%s max=%s>" %
352
            (self.__class__.__name__, self.min, self.max))
353

    
354

    
355
class ArgSuggest(_Argument):
356
  """Suggesting argument.
357

358
  Value can be any of the ones passed to the constructor.
359

360
  """
361
  # pylint: disable=W0622
362
  def __init__(self, min=0, max=None, choices=None):
363
    _Argument.__init__(self, min=min, max=max)
364
    self.choices = choices
365

    
366
  def __repr__(self):
367
    return ("<%s min=%s max=%s choices=%r>" %
368
            (self.__class__.__name__, self.min, self.max, self.choices))
369

    
370

    
371
class ArgChoice(ArgSuggest):
372
  """Choice argument.
373

374
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
375
  but value must be one of the choices.
376

377
  """
378

    
379

    
380
class ArgUnknown(_Argument):
381
  """Unknown argument to program (e.g. determined at runtime).
382

383
  """
384

    
385

    
386
class ArgInstance(_Argument):
387
  """Instances argument.
388

389
  """
390

    
391

    
392
class ArgNode(_Argument):
393
  """Node argument.
394

395
  """
396

    
397

    
398
class ArgNetwork(_Argument):
399
  """Network argument.
400

401
  """
402

    
403

    
404
class ArgGroup(_Argument):
405
  """Node group argument.
406

407
  """
408

    
409

    
410
class ArgJobId(_Argument):
411
  """Job ID argument.
412

413
  """
414

    
415

    
416
class ArgFile(_Argument):
417
  """File path argument.
418

419
  """
420

    
421

    
422
class ArgCommand(_Argument):
423
  """Command argument.
424

425
  """
426

    
427

    
428
class ArgHost(_Argument):
429
  """Host argument.
430

431
  """
432

    
433

    
434
class ArgOs(_Argument):
435
  """OS argument.
436

437
  """
438

    
439

    
440
class ArgExtStorage(_Argument):
441
  """ExtStorage argument.
442

443
  """
444

    
445

    
446
ARGS_NONE = []
447
ARGS_MANY_INSTANCES = [ArgInstance()]
448
ARGS_MANY_NETWORKS = [ArgNetwork()]
449
ARGS_MANY_NODES = [ArgNode()]
450
ARGS_MANY_GROUPS = [ArgGroup()]
451
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
452
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
453
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
454
# TODO
455
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
456
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
457

    
458

    
459
def _ExtractTagsObject(opts, args):
460
  """Extract the tag type object.
461

462
  Note that this function will modify its args parameter.
463

464
  """
465
  if not hasattr(opts, "tag_type"):
466
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
467
  kind = opts.tag_type
468
  if kind == constants.TAG_CLUSTER:
469
    retval = kind, ""
470
  elif kind in (constants.TAG_NODEGROUP,
471
                constants.TAG_NODE,
472
                constants.TAG_NETWORK,
473
                constants.TAG_INSTANCE):
474
    if not args:
475
      raise errors.OpPrereqError("no arguments passed to the command",
476
                                 errors.ECODE_INVAL)
477
    name = args.pop(0)
478
    retval = kind, name
479
  else:
480
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
481
  return retval
482

    
483

    
484
def _ExtendTags(opts, args):
485
  """Extend the args if a source file has been given.
486

487
  This function will extend the tags with the contents of the file
488
  passed in the 'tags_source' attribute of the opts parameter. A file
489
  named '-' will be replaced by stdin.
490

491
  """
492
  fname = opts.tags_source
493
  if fname is None:
494
    return
495
  if fname == "-":
496
    new_fh = sys.stdin
497
  else:
498
    new_fh = open(fname, "r")
499
  new_data = []
500
  try:
501
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
502
    # because of python bug 1633941
503
    while True:
504
      line = new_fh.readline()
505
      if not line:
506
        break
507
      new_data.append(line.strip())
508
  finally:
509
    new_fh.close()
510
  args.extend(new_data)
511

    
512

    
513
def ListTags(opts, args):
514
  """List the tags on a given object.
515

516
  This is a generic implementation that knows how to deal with all
517
  three cases of tag objects (cluster, node, instance). The opts
518
  argument is expected to contain a tag_type field denoting what
519
  object type we work on.
520

521
  """
522
  kind, name = _ExtractTagsObject(opts, args)
523
  cl = GetClient(query=True)
524
  result = cl.QueryTags(kind, name)
525
  result = list(result)
526
  result.sort()
527
  for tag in result:
528
    ToStdout(tag)
529

    
530

    
531
def AddTags(opts, args):
532
  """Add tags on a given object.
533

534
  This is a generic implementation that knows how to deal with all
535
  three cases of tag objects (cluster, node, instance). The opts
536
  argument is expected to contain a tag_type field denoting what
537
  object type we work on.
538

539
  """
540
  kind, name = _ExtractTagsObject(opts, args)
541
  _ExtendTags(opts, args)
542
  if not args:
543
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
544
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
545
  SubmitOrSend(op, opts)
546

    
547

    
548
def RemoveTags(opts, args):
549
  """Remove tags from a given object.
550

551
  This is a generic implementation that knows how to deal with all
552
  three cases of tag objects (cluster, node, instance). The opts
553
  argument is expected to contain a tag_type field denoting what
554
  object type we work on.
555

556
  """
557
  kind, name = _ExtractTagsObject(opts, args)
558
  _ExtendTags(opts, args)
559
  if not args:
560
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
561
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
562
  SubmitOrSend(op, opts)
563

    
564

    
565
def check_unit(option, opt, value): # pylint: disable=W0613
566
  """OptParsers custom converter for units.
567

568
  """
569
  try:
570
    return utils.ParseUnit(value)
571
  except errors.UnitParseError, err:
572
    raise OptionValueError("option %s: %s" % (opt, err))
573

    
574

    
575
def _SplitKeyVal(opt, data, parse_prefixes):
576
  """Convert a KeyVal string into a dict.
577

578
  This function will convert a key=val[,...] string into a dict. Empty
579
  values will be converted specially: keys which have the prefix 'no_'
580
  will have the value=False and the prefix stripped, keys with the prefix
581
  "-" will have value=None and the prefix stripped, and the others will
582
  have value=True.
583

584
  @type opt: string
585
  @param opt: a string holding the option name for which we process the
586
      data, used in building error messages
587
  @type data: string
588
  @param data: a string of the format key=val,key=val,...
589
  @type parse_prefixes: bool
590
  @param parse_prefixes: whether to handle prefixes specially
591
  @rtype: dict
592
  @return: {key=val, key=val}
593
  @raises errors.ParameterError: if there are duplicate keys
594

595
  """
596
  kv_dict = {}
597
  if data:
598
    for elem in utils.UnescapeAndSplit(data, sep=","):
599
      if "=" in elem:
600
        key, val = elem.split("=", 1)
601
      elif parse_prefixes:
602
        if elem.startswith(NO_PREFIX):
603
          key, val = elem[len(NO_PREFIX):], False
604
        elif elem.startswith(UN_PREFIX):
605
          key, val = elem[len(UN_PREFIX):], None
606
        else:
607
          key, val = elem, True
608
      else:
609
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
610
                                    (elem, opt))
611
      if key in kv_dict:
612
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
613
                                    (key, opt))
614
      kv_dict[key] = val
615
  return kv_dict
616

    
617

    
618
def _SplitIdentKeyVal(opt, value, parse_prefixes):
619
  """Helper function to parse "ident:key=val,key=val" options.
620

621
  @type opt: string
622
  @param opt: option name, used in error messages
623
  @type value: string
624
  @param value: expected to be in the format "ident:key=val,key=val,..."
625
  @type parse_prefixes: bool
626
  @param parse_prefixes: whether to handle prefixes specially (see
627
      L{_SplitKeyVal})
628
  @rtype: tuple
629
  @return: (ident, {key=val, key=val})
630
  @raises errors.ParameterError: in case of duplicates or other parsing errors
631

632
  """
633
  if ":" not in value:
634
    ident, rest = value, ""
635
  else:
636
    ident, rest = value.split(":", 1)
637

    
638
  if parse_prefixes and ident.startswith(NO_PREFIX):
639
    if rest:
640
      msg = "Cannot pass options when removing parameter groups: %s" % value
641
      raise errors.ParameterError(msg)
642
    retval = (ident[len(NO_PREFIX):], False)
643
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
644
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
645
    if rest:
646
      msg = "Cannot pass options when removing parameter groups: %s" % value
647
      raise errors.ParameterError(msg)
648
    retval = (ident[len(UN_PREFIX):], None)
649
  else:
650
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
651
    retval = (ident, kv_dict)
652
  return retval
653

    
654

    
655
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
656
  """Custom parser for ident:key=val,key=val options.
657

658
  This will store the parsed values as a tuple (ident, {key: val}). As such,
659
  multiple uses of this option via action=append is possible.
660

661
  """
662
  return _SplitIdentKeyVal(opt, value, True)
663

    
664

    
665
def check_key_val(option, opt, value):  # pylint: disable=W0613
666
  """Custom parser class for key=val,key=val options.
667

668
  This will store the parsed values as a dict {key: val}.
669

670
  """
671
  return _SplitKeyVal(opt, value, True)
672

    
673

    
674
def _SplitListKeyVal(opt, value):
675
  retval = {}
676
  for elem in value.split("/"):
677
    if not elem:
678
      raise errors.ParameterError("Empty section in option '%s'" % opt)
679
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
680
    if ident in retval:
681
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
682
             (ident, opt, elem))
683
      raise errors.ParameterError(msg)
684
    retval[ident] = valdict
685
  return retval
686

    
687

    
688
def check_multilist_ident_key_val(_, opt, value):
689
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
690

691
  @rtype: list of dictionary
692
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
693

694
  """
695
  retval = []
696
  for line in value.split("//"):
697
    retval.append(_SplitListKeyVal(opt, line))
698
  return retval
699

    
700

    
701
def check_bool(option, opt, value): # pylint: disable=W0613
702
  """Custom parser for yes/no options.
703

704
  This will store the parsed value as either True or False.
705

706
  """
707
  value = value.lower()
708
  if value == constants.VALUE_FALSE or value == "no":
709
    return False
710
  elif value == constants.VALUE_TRUE or value == "yes":
711
    return True
712
  else:
713
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
714

    
715

    
716
def check_list(option, opt, value): # pylint: disable=W0613
717
  """Custom parser for comma-separated lists.
718

719
  """
720
  # we have to make this explicit check since "".split(",") is [""],
721
  # not an empty list :(
722
  if not value:
723
    return []
724
  else:
725
    return utils.UnescapeAndSplit(value)
726

    
727

    
728
def check_maybefloat(option, opt, value): # pylint: disable=W0613
729
  """Custom parser for float numbers which might be also defaults.
730

731
  """
732
  value = value.lower()
733

    
734
  if value == constants.VALUE_DEFAULT:
735
    return value
736
  else:
737
    return float(value)
738

    
739

    
740
# completion_suggestion is normally a list. Using numeric values not evaluating
741
# to False for dynamic completion.
742
(OPT_COMPL_MANY_NODES,
743
 OPT_COMPL_ONE_NODE,
744
 OPT_COMPL_ONE_INSTANCE,
745
 OPT_COMPL_ONE_OS,
746
 OPT_COMPL_ONE_EXTSTORAGE,
747
 OPT_COMPL_ONE_IALLOCATOR,
748
 OPT_COMPL_ONE_NETWORK,
749
 OPT_COMPL_INST_ADD_NODES,
750
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
751

    
752
OPT_COMPL_ALL = compat.UniqueFrozenset([
753
  OPT_COMPL_MANY_NODES,
754
  OPT_COMPL_ONE_NODE,
755
  OPT_COMPL_ONE_INSTANCE,
756
  OPT_COMPL_ONE_OS,
757
  OPT_COMPL_ONE_EXTSTORAGE,
758
  OPT_COMPL_ONE_IALLOCATOR,
759
  OPT_COMPL_ONE_NETWORK,
760
  OPT_COMPL_INST_ADD_NODES,
761
  OPT_COMPL_ONE_NODEGROUP,
762
  ])
763

    
764

    
765
class CliOption(Option):
766
  """Custom option class for optparse.
767

768
  """
769
  ATTRS = Option.ATTRS + [
770
    "completion_suggest",
771
    ]
772
  TYPES = Option.TYPES + (
773
    "multilistidentkeyval",
774
    "identkeyval",
775
    "keyval",
776
    "unit",
777
    "bool",
778
    "list",
779
    "maybefloat",
780
    )
781
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
782
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
783
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
784
  TYPE_CHECKER["keyval"] = check_key_val
785
  TYPE_CHECKER["unit"] = check_unit
786
  TYPE_CHECKER["bool"] = check_bool
787
  TYPE_CHECKER["list"] = check_list
788
  TYPE_CHECKER["maybefloat"] = check_maybefloat
789

    
790

    
791
# optparse.py sets make_option, so we do it for our own option class, too
792
cli_option = CliOption
793

    
794

    
795
_YORNO = "yes|no"
796

    
797
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
798
                       help="Increase debugging level")
799

    
800
NOHDR_OPT = cli_option("--no-headers", default=False,
801
                       action="store_true", dest="no_headers",
802
                       help="Don't display column headers")
803

    
804
SEP_OPT = cli_option("--separator", default=None,
805
                     action="store", dest="separator",
806
                     help=("Separator between output fields"
807
                           " (defaults to one space)"))
808

    
809
USEUNITS_OPT = cli_option("--units", default=None,
810
                          dest="units", choices=("h", "m", "g", "t"),
811
                          help="Specify units for output (one of h/m/g/t)")
812

    
813
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
814
                        type="string", metavar="FIELDS",
815
                        help="Comma separated list of output fields")
816

    
817
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
818
                       default=False, help="Force the operation")
819

    
820
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
821
                         default=False, help="Do not require confirmation")
822

    
823
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
824
                                  action="store_true", default=False,
825
                                  help=("Ignore offline nodes and do as much"
826
                                        " as possible"))
827

    
828
TAG_ADD_OPT = cli_option("--tags", dest="tags",
829
                         default=None, help="Comma-separated list of instance"
830
                                            " tags")
831

    
832
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
833
                         default=None, help="File with tag names")
834

    
835
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
836
                        default=False, action="store_true",
837
                        help=("Submit the job and return the job ID, but"
838
                              " don't wait for the job to finish"))
839

    
840
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
841
                             default=False, action="store_true",
842
                             help=("Additionally print the job as first line"
843
                                   " on stdout (for scripting)."))
844

    
845
SYNC_OPT = cli_option("--sync", dest="do_locking",
846
                      default=False, action="store_true",
847
                      help=("Grab locks while doing the queries"
848
                            " in order to ensure more consistent results"))
849

    
850
DRY_RUN_OPT = cli_option("--dry-run", default=False,
851
                         action="store_true",
852
                         help=("Do not execute the operation, just run the"
853
                               " check steps and verify if it could be"
854
                               " executed"))
855

    
856
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
857
                         action="store_true",
858
                         help="Increase the verbosity of the operation")
859

    
860
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
861
                              action="store_true", dest="simulate_errors",
862
                              help="Debugging option that makes the operation"
863
                              " treat most runtime checks as failed")
864

    
865
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
866
                        default=True, action="store_false",
867
                        help="Don't wait for sync (DANGEROUS!)")
868

    
869
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
870
                        default=False, action="store_true",
871
                        help="Wait for disks to sync")
872

    
873
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
874
                             action="store_true", default=False,
875
                             help="Enable offline instance")
876

    
877
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
878
                              action="store_true", default=False,
879
                              help="Disable down instance")
880

    
881
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
882
                               help=("Custom disk setup (%s)" %
883
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
884
                               default=None, metavar="TEMPL",
885
                               choices=list(constants.DISK_TEMPLATES))
886

    
887
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
888
                        help="Do not create any network cards for"
889
                        " the instance")
890

    
891
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
892
                               help="Relative path under default cluster-wide"
893
                               " file storage dir to store file-based disks",
894
                               default=None, metavar="<DIR>")
895

    
896
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
897
                                  help="Driver to use for image files",
898
                                  default=None, metavar="<DRIVER>",
899
                                  choices=list(constants.FILE_DRIVER))
900

    
901
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
902
                            help="Select nodes for the instance automatically"
903
                            " using the <NAME> iallocator plugin",
904
                            default=None, type="string",
905
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
906

    
907
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
908
                                    metavar="<NAME>",
909
                                    help="Set the default instance"
910
                                    " allocator plugin",
911
                                    default=None, type="string",
912
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
913

    
914
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
915
                    metavar="<os>",
916
                    completion_suggest=OPT_COMPL_ONE_OS)
917

    
918
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
919
                          type="keyval", default={},
920
                          help="OS parameters")
921

    
922
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
923
                               action="store_true", default=False,
924
                               help="Force an unknown variant")
925

    
926
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
927
                            action="store_true", default=False,
928
                            help="Do not install the OS (will"
929
                            " enable no-start)")
930

    
931
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
932
                                dest="allow_runtime_chgs",
933
                                default=True, action="store_false",
934
                                help="Don't allow runtime changes")
935

    
936
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
937
                         type="keyval", default={},
938
                         help="Backend parameters")
939

    
940
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
941
                        default={}, dest="hvparams",
942
                        help="Hypervisor parameters")
943

    
944
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
945
                             help="Disk template parameters, in the format"
946
                             " template:option=value,option=value,...",
947
                             type="identkeyval", action="append", default=[])
948

    
949
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
950
                                 type="keyval", default={},
951
                                 help="Memory size specs: list of key=value,"
952
                                " where key is one of min, max, std"
953
                                 " (in MB or using a unit)")
954

    
955
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
956
                                 type="keyval", default={},
957
                                 help="CPU count specs: list of key=value,"
958
                                 " where key is one of min, max, std")
959

    
960
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
961
                                  dest="ispecs_disk_count",
962
                                  type="keyval", default={},
963
                                  help="Disk count specs: list of key=value,"
964
                                  " where key is one of min, max, std")
965

    
966
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
967
                                 type="keyval", default={},
968
                                 help="Disk size specs: list of key=value,"
969
                                 " where key is one of min, max, std"
970
                                 " (in MB or using a unit)")
971

    
972
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
973
                                 type="keyval", default={},
974
                                 help="NIC count specs: list of key=value,"
975
                                 " where key is one of min, max, std")
976

    
977
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
978
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
979
                                      dest="ipolicy_bounds_specs",
980
                                      type="multilistidentkeyval", default=None,
981
                                      help="Complete instance specs limits")
982

    
983
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
984
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
985
                                   dest="ipolicy_std_specs",
986
                                   type="keyval", default=None,
987
                                   help="Complte standard instance specs")
988

    
989
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
990
                                    dest="ipolicy_disk_templates",
991
                                    type="list", default=None,
992
                                    help="Comma-separated list of"
993
                                    " enabled disk templates")
994

    
995
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
996
                                 dest="ipolicy_vcpu_ratio",
997
                                 type="maybefloat", default=None,
998
                                 help="The maximum allowed vcpu-to-cpu ratio")
999

    
1000
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1001
                                   dest="ipolicy_spindle_ratio",
1002
                                   type="maybefloat", default=None,
1003
                                   help=("The maximum allowed instances to"
1004
                                         " spindle ratio"))
1005

    
1006
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1007
                            help="Hypervisor and hypervisor options, in the"
1008
                            " format hypervisor:option=value,option=value,...",
1009
                            default=None, type="identkeyval")
1010

    
1011
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1012
                        help="Hypervisor and hypervisor options, in the"
1013
                        " format hypervisor:option=value,option=value,...",
1014
                        default=[], action="append", type="identkeyval")
1015

    
1016
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1017
                           action="store_false",
1018
                           help="Don't check that the instance's IP"
1019
                           " is alive")
1020

    
1021
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1022
                             default=True, action="store_false",
1023
                             help="Don't check that the instance's name"
1024
                             " is resolvable")
1025

    
1026
NET_OPT = cli_option("--net",
1027
                     help="NIC parameters", default=[],
1028
                     dest="nics", action="append", type="identkeyval")
1029

    
1030
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1031
                      dest="disks", action="append", type="identkeyval")
1032

    
1033
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1034
                         help="Comma-separated list of disks"
1035
                         " indices to act on (e.g. 0,2) (optional,"
1036
                         " defaults to all disks)")
1037

    
1038
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1039
                         help="Enforces a single-disk configuration using the"
1040
                         " given disk size, in MiB unless a suffix is used",
1041
                         default=None, type="unit", metavar="<size>")
1042

    
1043
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1044
                                dest="ignore_consistency",
1045
                                action="store_true", default=False,
1046
                                help="Ignore the consistency of the disks on"
1047
                                " the secondary")
1048

    
1049
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1050
                                dest="allow_failover",
1051
                                action="store_true", default=False,
1052
                                help="If migration is not possible fallback to"
1053
                                     " failover")
1054

    
1055
NONLIVE_OPT = cli_option("--non-live", dest="live",
1056
                         default=True, action="store_false",
1057
                         help="Do a non-live migration (this usually means"
1058
                         " freeze the instance, save the state, transfer and"
1059
                         " only then resume running on the secondary node)")
1060

    
1061
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1062
                                default=None,
1063
                                choices=list(constants.HT_MIGRATION_MODES),
1064
                                help="Override default migration mode (choose"
1065
                                " either live or non-live")
1066

    
1067
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1068
                                help="Target node and optional secondary node",
1069
                                metavar="<pnode>[:<snode>]",
1070
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1071

    
1072
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1073
                           action="append", metavar="<node>",
1074
                           help="Use only this node (can be used multiple"
1075
                           " times, if not given defaults to all nodes)",
1076
                           completion_suggest=OPT_COMPL_ONE_NODE)
1077

    
1078
NODEGROUP_OPT_NAME = "--node-group"
1079
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1080
                           dest="nodegroup",
1081
                           help="Node group (name or uuid)",
1082
                           metavar="<nodegroup>",
1083
                           default=None, type="string",
1084
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1085

    
1086
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1087
                             metavar="<node>",
1088
                             completion_suggest=OPT_COMPL_ONE_NODE)
1089

    
1090
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1091
                         action="store_false",
1092
                         help="Don't start the instance after creation")
1093

    
1094
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1095
                         action="store_true", default=False,
1096
                         help="Show command instead of executing it")
1097

    
1098
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1099
                         default=False, action="store_true",
1100
                         help="Instead of performing the migration/failover,"
1101
                         " try to recover from a failed cleanup. This is safe"
1102
                         " to run even if the instance is healthy, but it"
1103
                         " will create extra replication traffic and "
1104
                         " disrupt briefly the replication (like during the"
1105
                         " migration/failover")
1106

    
1107
STATIC_OPT = cli_option("-s", "--static", dest="static",
1108
                        action="store_true", default=False,
1109
                        help="Only show configuration data, not runtime data")
1110

    
1111
ALL_OPT = cli_option("--all", dest="show_all",
1112
                     default=False, action="store_true",
1113
                     help="Show info on all instances on the cluster."
1114
                     " This can take a long time to run, use wisely")
1115

    
1116
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1117
                           action="store_true", default=False,
1118
                           help="Interactive OS reinstall, lists available"
1119
                           " OS templates for selection")
1120

    
1121
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1122
                                 action="store_true", default=False,
1123
                                 help="Remove the instance from the cluster"
1124
                                 " configuration even if there are failures"
1125
                                 " during the removal process")
1126

    
1127
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1128
                                        dest="ignore_remove_failures",
1129
                                        action="store_true", default=False,
1130
                                        help="Remove the instance from the"
1131
                                        " cluster configuration even if there"
1132
                                        " are failures during the removal"
1133
                                        " process")
1134

    
1135
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1136
                                 action="store_true", default=False,
1137
                                 help="Remove the instance from the cluster")
1138

    
1139
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1140
                               help="Specifies the new node for the instance",
1141
                               metavar="NODE", default=None,
1142
                               completion_suggest=OPT_COMPL_ONE_NODE)
1143

    
1144
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1145
                               help="Specifies the new secondary node",
1146
                               metavar="NODE", default=None,
1147
                               completion_suggest=OPT_COMPL_ONE_NODE)
1148

    
1149
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1150
                             help="Specifies the new primary node",
1151
                             metavar="<node>", default=None,
1152
                             completion_suggest=OPT_COMPL_ONE_NODE)
1153

    
1154
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1155
                            default=False, action="store_true",
1156
                            help="Replace the disk(s) on the primary"
1157
                                 " node (applies only to internally mirrored"
1158
                                 " disk templates, e.g. %s)" %
1159
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1160

    
1161
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1162
                              default=False, action="store_true",
1163
                              help="Replace the disk(s) on the secondary"
1164
                                   " node (applies only to internally mirrored"
1165
                                   " disk templates, e.g. %s)" %
1166
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1167

    
1168
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1169
                              default=False, action="store_true",
1170
                              help="Lock all nodes and auto-promote as needed"
1171
                              " to MC status")
1172

    
1173
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1174
                              default=False, action="store_true",
1175
                              help="Automatically replace faulty disks"
1176
                                   " (applies only to internally mirrored"
1177
                                   " disk templates, e.g. %s)" %
1178
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1179

    
1180
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1181
                             default=False, action="store_true",
1182
                             help="Ignore current recorded size"
1183
                             " (useful for forcing activation when"
1184
                             " the recorded size is wrong)")
1185

    
1186
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1187
                          metavar="<node>",
1188
                          completion_suggest=OPT_COMPL_ONE_NODE)
1189

    
1190
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1191
                         metavar="<dir>")
1192

    
1193
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1194
                              help="Specify the secondary ip for the node",
1195
                              metavar="ADDRESS", default=None)
1196

    
1197
READD_OPT = cli_option("--readd", dest="readd",
1198
                       default=False, action="store_true",
1199
                       help="Readd old node after replacing it")
1200

    
1201
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1202
                                default=True, action="store_false",
1203
                                help="Disable SSH key fingerprint checking")
1204

    
1205
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1206
                                 default=False, action="store_true",
1207
                                 help="Force the joining of a node")
1208

    
1209
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1210
                    type="bool", default=None, metavar=_YORNO,
1211
                    help="Set the master_candidate flag on the node")
1212

    
1213
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1214
                         type="bool", default=None,
1215
                         help=("Set the offline flag on the node"
1216
                               " (cluster does not communicate with offline"
1217
                               " nodes)"))
1218

    
1219
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1220
                         type="bool", default=None,
1221
                         help=("Set the drained flag on the node"
1222
                               " (excluded from allocation operations)"))
1223

    
1224
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1225
                              type="bool", default=None, metavar=_YORNO,
1226
                              help="Set the master_capable flag on the node")
1227

    
1228
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1229
                          type="bool", default=None, metavar=_YORNO,
1230
                          help="Set the vm_capable flag on the node")
1231

    
1232
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1233
                             type="bool", default=None, metavar=_YORNO,
1234
                             help="Set the allocatable flag on a volume")
1235

    
1236
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1237
                               help="Disable support for lvm based instances"
1238
                               " (cluster-wide)",
1239
                               action="store_false", default=True)
1240

    
1241
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1242
                            dest="enabled_hypervisors",
1243
                            help="Comma-separated list of hypervisors",
1244
                            type="string", default=None)
1245

    
1246
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1247
                                        dest="enabled_disk_templates",
1248
                                        help="Comma-separated list of "
1249
                                             "disk templates",
1250
                                        type="string", default=None)
1251

    
1252
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1253
                            type="keyval", default={},
1254
                            help="NIC parameters")
1255

    
1256
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1257
                         dest="candidate_pool_size", type="int",
1258
                         help="Set the candidate pool size")
1259

    
1260
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1261
                         help=("Enables LVM and specifies the volume group"
1262
                               " name (cluster-wide) for disk allocation"
1263
                               " [%s]" % constants.DEFAULT_VG),
1264
                         metavar="VG", default=None)
1265

    
1266
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1267
                          help="Destroy cluster", action="store_true")
1268

    
1269
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1270
                          help="Skip node agreement check (dangerous)",
1271
                          action="store_true", default=False)
1272

    
1273
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1274
                            help="Specify the mac prefix for the instance IP"
1275
                            " addresses, in the format XX:XX:XX",
1276
                            metavar="PREFIX",
1277
                            default=None)
1278

    
1279
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1280
                               help="Specify the node interface (cluster-wide)"
1281
                               " on which the master IP address will be added"
1282
                               " (cluster init default: %s)" %
1283
                               constants.DEFAULT_BRIDGE,
1284
                               metavar="NETDEV",
1285
                               default=None)
1286

    
1287
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1288
                                help="Specify the netmask of the master IP",
1289
                                metavar="NETMASK",
1290
                                default=None)
1291

    
1292
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1293
                                     dest="use_external_mip_script",
1294
                                     help="Specify whether to run a"
1295
                                     " user-provided script for the master"
1296
                                     " IP address turnup and"
1297
                                     " turndown operations",
1298
                                     type="bool", metavar=_YORNO, default=None)
1299

    
1300
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1301
                                help="Specify the default directory (cluster-"
1302
                                "wide) for storing the file-based disks [%s]" %
1303
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1304
                                metavar="DIR",
1305
                                default=None)
1306

    
1307
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1308
  "--shared-file-storage-dir",
1309
  dest="shared_file_storage_dir",
1310
  help="Specify the default directory (cluster-wide) for storing the"
1311
  " shared file-based disks [%s]" %
1312
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1313
  metavar="SHAREDDIR", default=None)
1314

    
1315
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1316
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1317
                                   action="store_false", default=True)
1318

    
1319
MODIFY_ETCHOSTS_OPT = \
1320
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1321
            default=None, type="bool",
1322
            help="Defines whether the cluster should autonomously modify"
1323
            " and keep in sync the /etc/hosts file of the nodes")
1324

    
1325
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1326
                                    help="Don't initialize SSH keys",
1327
                                    action="store_false", default=True)
1328

    
1329
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1330
                             help="Enable parseable error messages",
1331
                             action="store_true", default=False)
1332

    
1333
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1334
                          help="Skip N+1 memory redundancy tests",
1335
                          action="store_true", default=False)
1336

    
1337
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1338
                             help="Type of reboot: soft/hard/full",
1339
                             default=constants.INSTANCE_REBOOT_HARD,
1340
                             metavar="<REBOOT>",
1341
                             choices=list(constants.REBOOT_TYPES))
1342

    
1343
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1344
                                    dest="ignore_secondaries",
1345
                                    default=False, action="store_true",
1346
                                    help="Ignore errors from secondaries")
1347

    
1348
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1349
                            action="store_false", default=True,
1350
                            help="Don't shutdown the instance (unsafe)")
1351

    
1352
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1353
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1354
                         help="Maximum time to wait")
1355

    
1356
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1357
                                  dest="shutdown_timeout", type="int",
1358
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1359
                                  help="Maximum time to wait for instance"
1360
                                  " shutdown")
1361

    
1362
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1363
                          default=None,
1364
                          help=("Number of seconds between repetions of the"
1365
                                " command"))
1366

    
1367
EARLY_RELEASE_OPT = cli_option("--early-release",
1368
                               dest="early_release", default=False,
1369
                               action="store_true",
1370
                               help="Release the locks on the secondary"
1371
                               " node(s) early")
1372

    
1373
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1374
                                  dest="new_cluster_cert",
1375
                                  default=False, action="store_true",
1376
                                  help="Generate a new cluster certificate")
1377

    
1378
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1379
                           default=None,
1380
                           help="File containing new RAPI certificate")
1381

    
1382
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1383
                               default=None, action="store_true",
1384
                               help=("Generate a new self-signed RAPI"
1385
                                     " certificate"))
1386

    
1387
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1388
                            default=None,
1389
                            help="File containing new SPICE certificate")
1390

    
1391
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1392
                              default=None,
1393
                              help="File containing the certificate of the CA"
1394
                              " which signed the SPICE certificate")
1395

    
1396
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1397
                                dest="new_spice_cert", default=None,
1398
                                action="store_true",
1399
                                help=("Generate a new self-signed SPICE"
1400
                                      " certificate"))
1401

    
1402
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1403
                                    dest="new_confd_hmac_key",
1404
                                    default=False, action="store_true",
1405
                                    help=("Create a new HMAC key for %s" %
1406
                                          constants.CONFD))
1407

    
1408
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1409
                                       dest="cluster_domain_secret",
1410
                                       default=None,
1411
                                       help=("Load new new cluster domain"
1412
                                             " secret from file"))
1413

    
1414
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1415
                                           dest="new_cluster_domain_secret",
1416
                                           default=False, action="store_true",
1417
                                           help=("Create a new cluster domain"
1418
                                                 " secret"))
1419

    
1420
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1421
                              dest="use_replication_network",
1422
                              help="Whether to use the replication network"
1423
                              " for talking to the nodes",
1424
                              action="store_true", default=False)
1425

    
1426
MAINTAIN_NODE_HEALTH_OPT = \
1427
    cli_option("--maintain-node-health", dest="maintain_node_health",
1428
               metavar=_YORNO, default=None, type="bool",
1429
               help="Configure the cluster to automatically maintain node"
1430
               " health, by shutting down unknown instances, shutting down"
1431
               " unknown DRBD devices, etc.")
1432

    
1433
IDENTIFY_DEFAULTS_OPT = \
1434
    cli_option("--identify-defaults", dest="identify_defaults",
1435
               default=False, action="store_true",
1436
               help="Identify which saved instance parameters are equal to"
1437
               " the current cluster defaults and set them as such, instead"
1438
               " of marking them as overridden")
1439

    
1440
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1441
                         action="store", dest="uid_pool",
1442
                         help=("A list of user-ids or user-id"
1443
                               " ranges separated by commas"))
1444

    
1445
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1446
                          action="store", dest="add_uids",
1447
                          help=("A list of user-ids or user-id"
1448
                                " ranges separated by commas, to be"
1449
                                " added to the user-id pool"))
1450

    
1451
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1452
                             action="store", dest="remove_uids",
1453
                             help=("A list of user-ids or user-id"
1454
                                   " ranges separated by commas, to be"
1455
                                   " removed from the user-id pool"))
1456

    
1457
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1458
                              action="store", dest="reserved_lvs",
1459
                              help=("A comma-separated list of reserved"
1460
                                    " logical volumes names, that will be"
1461
                                    " ignored by cluster verify"))
1462

    
1463
ROMAN_OPT = cli_option("--roman",
1464
                       dest="roman_integers", default=False,
1465
                       action="store_true",
1466
                       help="Use roman numbers for positive integers")
1467

    
1468
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1469
                             action="store", default=None,
1470
                             help="Specifies usermode helper for DRBD")
1471

    
1472
PRIMARY_IP_VERSION_OPT = \
1473
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1474
               action="store", dest="primary_ip_version",
1475
               metavar="%d|%d" % (constants.IP4_VERSION,
1476
                                  constants.IP6_VERSION),
1477
               help="Cluster-wide IP version for primary IP")
1478

    
1479
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1480
                              action="store_true",
1481
                              help="Show machine name for every line in output")
1482

    
1483
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1484
                              action="store_true",
1485
                              help=("Hide successful results and show failures"
1486
                                    " only (determined by the exit code)"))
1487

    
1488
REASON_OPT = cli_option("--reason", default=None,
1489
                        help="The reason for executing the command")
1490

    
1491

    
1492
def _PriorityOptionCb(option, _, value, parser):
1493
  """Callback for processing C{--priority} option.
1494

1495
  """
1496
  value = _PRIONAME_TO_VALUE[value]
1497

    
1498
  setattr(parser.values, option.dest, value)
1499

    
1500

    
1501
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1502
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1503
                          choices=_PRIONAME_TO_VALUE.keys(),
1504
                          action="callback", type="choice",
1505
                          callback=_PriorityOptionCb,
1506
                          help="Priority for opcode processing")
1507

    
1508
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1509
                        type="bool", default=None, metavar=_YORNO,
1510
                        help="Sets the hidden flag on the OS")
1511

    
1512
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1513
                        type="bool", default=None, metavar=_YORNO,
1514
                        help="Sets the blacklisted flag on the OS")
1515

    
1516
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1517
                                     type="bool", metavar=_YORNO,
1518
                                     dest="prealloc_wipe_disks",
1519
                                     help=("Wipe disks prior to instance"
1520
                                           " creation"))
1521

    
1522
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1523
                             type="keyval", default=None,
1524
                             help="Node parameters")
1525

    
1526
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1527
                              action="store", metavar="POLICY", default=None,
1528
                              help="Allocation policy for the node group")
1529

    
1530
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1531
                              type="bool", metavar=_YORNO,
1532
                              dest="node_powered",
1533
                              help="Specify if the SoR for node is powered")
1534

    
1535
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1536
                             default=constants.OOB_TIMEOUT,
1537
                             help="Maximum time to wait for out-of-band helper")
1538

    
1539
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1540
                             default=constants.OOB_POWER_DELAY,
1541
                             help="Time in seconds to wait between power-ons")
1542

    
1543
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1544
                              action="store_true", default=False,
1545
                              help=("Whether command argument should be treated"
1546
                                    " as filter"))
1547

    
1548
NO_REMEMBER_OPT = cli_option("--no-remember",
1549
                             dest="no_remember",
1550
                             action="store_true", default=False,
1551
                             help="Perform but do not record the change"
1552
                             " in the configuration")
1553

    
1554
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1555
                              default=False, action="store_true",
1556
                              help="Evacuate primary instances only")
1557

    
1558
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1559
                                default=False, action="store_true",
1560
                                help="Evacuate secondary instances only"
1561
                                     " (applies only to internally mirrored"
1562
                                     " disk templates, e.g. %s)" %
1563
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1564

    
1565
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1566
                                action="store_true", default=False,
1567
                                help="Pause instance at startup")
1568

    
1569
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1570
                          help="Destination node group (name or uuid)",
1571
                          default=None, action="append",
1572
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1573

    
1574
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1575
                               action="append", dest="ignore_errors",
1576
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1577
                               help="Error code to be ignored")
1578

    
1579
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1580
                            action="append",
1581
                            help=("Specify disk state information in the"
1582
                                  " format"
1583
                                  " storage_type/identifier:option=value,...;"
1584
                                  " note this is unused for now"),
1585
                            type="identkeyval")
1586

    
1587
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1588
                          action="append",
1589
                          help=("Specify hypervisor state information in the"
1590
                                " format hypervisor:option=value,...;"
1591
                                " note this is unused for now"),
1592
                          type="identkeyval")
1593

    
1594
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1595
                                action="store_true", default=False,
1596
                                help="Ignore instance policy violations")
1597

    
1598
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1599
                             help="Sets the instance's runtime memory,"
1600
                             " ballooning it up or down to the new value",
1601
                             default=None, type="unit", metavar="<size>")
1602

    
1603
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1604
                          action="store_true", default=False,
1605
                          help="Marks the grow as absolute instead of the"
1606
                          " (default) relative mode")
1607

    
1608
NETWORK_OPT = cli_option("--network",
1609
                         action="store", default=None, dest="network",
1610
                         help="IP network in CIDR notation")
1611

    
1612
GATEWAY_OPT = cli_option("--gateway",
1613
                         action="store", default=None, dest="gateway",
1614
                         help="IP address of the router (gateway)")
1615

    
1616
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1617
                                  action="store", default=None,
1618
                                  dest="add_reserved_ips",
1619
                                  help="Comma-separated list of"
1620
                                  " reserved IPs to add")
1621

    
1622
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1623
                                     action="store", default=None,
1624
                                     dest="remove_reserved_ips",
1625
                                     help="Comma-delimited list of"
1626
                                     " reserved IPs to remove")
1627

    
1628
NETWORK6_OPT = cli_option("--network6",
1629
                          action="store", default=None, dest="network6",
1630
                          help="IP network in CIDR notation")
1631

    
1632
GATEWAY6_OPT = cli_option("--gateway6",
1633
                          action="store", default=None, dest="gateway6",
1634
                          help="IP6 address of the router (gateway)")
1635

    
1636
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1637
                                  dest="conflicts_check",
1638
                                  default=True,
1639
                                  action="store_false",
1640
                                  help="Don't check for conflicting IPs")
1641

    
1642
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1643
                                 default=False, action="store_true",
1644
                                 help="Include default values")
1645

    
1646
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1647
                         action="store_true", default=False,
1648
                         help="Hotplug supported devices (NICs and Disks)")
1649

    
1650
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1651
                                     dest="hotplug_if_possible",
1652
                                     action="store_true", default=False,
1653
                                     help="Hotplug devices in case"
1654
                                          " hotplug is supported")
1655

    
1656
#: Options provided by all commands
1657
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1658

    
1659
# options related to asynchronous job handling
1660

    
1661
SUBMIT_OPTS = [
1662
  SUBMIT_OPT,
1663
  PRINT_JOBID_OPT,
1664
  ]
1665

    
1666
# common options for creating instances. add and import then add their own
1667
# specific ones.
1668
COMMON_CREATE_OPTS = [
1669
  BACKEND_OPT,
1670
  DISK_OPT,
1671
  DISK_TEMPLATE_OPT,
1672
  FILESTORE_DIR_OPT,
1673
  FILESTORE_DRIVER_OPT,
1674
  HYPERVISOR_OPT,
1675
  IALLOCATOR_OPT,
1676
  NET_OPT,
1677
  NODE_PLACEMENT_OPT,
1678
  NOIPCHECK_OPT,
1679
  NOCONFLICTSCHECK_OPT,
1680
  NONAMECHECK_OPT,
1681
  NONICS_OPT,
1682
  NWSYNC_OPT,
1683
  OSPARAMS_OPT,
1684
  OS_SIZE_OPT,
1685
  SUBMIT_OPT,
1686
  PRINT_JOBID_OPT,
1687
  TAG_ADD_OPT,
1688
  DRY_RUN_OPT,
1689
  PRIORITY_OPT,
1690
  ]
1691

    
1692
# common instance policy options
1693
INSTANCE_POLICY_OPTS = [
1694
  IPOLICY_BOUNDS_SPECS_OPT,
1695
  IPOLICY_DISK_TEMPLATES,
1696
  IPOLICY_VCPU_RATIO,
1697
  IPOLICY_SPINDLE_RATIO,
1698
  ]
1699

    
1700
# instance policy split specs options
1701
SPLIT_ISPECS_OPTS = [
1702
  SPECS_CPU_COUNT_OPT,
1703
  SPECS_DISK_COUNT_OPT,
1704
  SPECS_DISK_SIZE_OPT,
1705
  SPECS_MEM_SIZE_OPT,
1706
  SPECS_NIC_COUNT_OPT,
1707
  ]
1708

    
1709

    
1710
class _ShowUsage(Exception):
1711
  """Exception class for L{_ParseArgs}.
1712

1713
  """
1714
  def __init__(self, exit_error):
1715
    """Initializes instances of this class.
1716

1717
    @type exit_error: bool
1718
    @param exit_error: Whether to report failure on exit
1719

1720
    """
1721
    Exception.__init__(self)
1722
    self.exit_error = exit_error
1723

    
1724

    
1725
class _ShowVersion(Exception):
1726
  """Exception class for L{_ParseArgs}.
1727

1728
  """
1729

    
1730

    
1731
def _ParseArgs(binary, argv, commands, aliases, env_override):
1732
  """Parser for the command line arguments.
1733

1734
  This function parses the arguments and returns the function which
1735
  must be executed together with its (modified) arguments.
1736

1737
  @param binary: Script name
1738
  @param argv: Command line arguments
1739
  @param commands: Dictionary containing command definitions
1740
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1741
  @param env_override: list of env variables allowed for default args
1742
  @raise _ShowUsage: If usage description should be shown
1743
  @raise _ShowVersion: If version should be shown
1744

1745
  """
1746
  assert not (env_override - set(commands))
1747
  assert not (set(aliases.keys()) & set(commands.keys()))
1748

    
1749
  if len(argv) > 1:
1750
    cmd = argv[1]
1751
  else:
1752
    # No option or command given
1753
    raise _ShowUsage(exit_error=True)
1754

    
1755
  if cmd == "--version":
1756
    raise _ShowVersion()
1757
  elif cmd == "--help":
1758
    raise _ShowUsage(exit_error=False)
1759
  elif not (cmd in commands or cmd in aliases):
1760
    raise _ShowUsage(exit_error=True)
1761

    
1762
  # get command, unalias it, and look it up in commands
1763
  if cmd in aliases:
1764
    if aliases[cmd] not in commands:
1765
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1766
                                   " command '%s'" % (cmd, aliases[cmd]))
1767

    
1768
    cmd = aliases[cmd]
1769

    
1770
  if cmd in env_override:
1771
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1772
    env_args = os.environ.get(args_env_name)
1773
    if env_args:
1774
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1775

    
1776
  func, args_def, parser_opts, usage, description = commands[cmd]
1777
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1778
                        description=description,
1779
                        formatter=TitledHelpFormatter(),
1780
                        usage="%%prog %s %s" % (cmd, usage))
1781
  parser.disable_interspersed_args()
1782
  options, args = parser.parse_args(args=argv[2:])
1783

    
1784
  if not _CheckArguments(cmd, args_def, args):
1785
    return None, None, None
1786

    
1787
  return func, options, args
1788

    
1789

    
1790
def _FormatUsage(binary, commands):
1791
  """Generates a nice description of all commands.
1792

1793
  @param binary: Script name
1794
  @param commands: Dictionary containing command definitions
1795

1796
  """
1797
  # compute the max line length for cmd + usage
1798
  mlen = min(60, max(map(len, commands)))
1799

    
1800
  yield "Usage: %s {command} [options...] [argument...]" % binary
1801
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1802
  yield ""
1803
  yield "Commands:"
1804

    
1805
  # and format a nice command list
1806
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1807
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1808
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1809
    for line in help_lines:
1810
      yield " %-*s   %s" % (mlen, "", line)
1811

    
1812
  yield ""
1813

    
1814

    
1815
def _CheckArguments(cmd, args_def, args):
1816
  """Verifies the arguments using the argument definition.
1817

1818
  Algorithm:
1819

1820
    1. Abort with error if values specified by user but none expected.
1821

1822
    1. For each argument in definition
1823

1824
      1. Keep running count of minimum number of values (min_count)
1825
      1. Keep running count of maximum number of values (max_count)
1826
      1. If it has an unlimited number of values
1827

1828
        1. Abort with error if it's not the last argument in the definition
1829

1830
    1. If last argument has limited number of values
1831

1832
      1. Abort with error if number of values doesn't match or is too large
1833

1834
    1. Abort with error if user didn't pass enough values (min_count)
1835

1836
  """
1837
  if args and not args_def:
1838
    ToStderr("Error: Command %s expects no arguments", cmd)
1839
    return False
1840

    
1841
  min_count = None
1842
  max_count = None
1843
  check_max = None
1844

    
1845
  last_idx = len(args_def) - 1
1846

    
1847
  for idx, arg in enumerate(args_def):
1848
    if min_count is None:
1849
      min_count = arg.min
1850
    elif arg.min is not None:
1851
      min_count += arg.min
1852

    
1853
    if max_count is None:
1854
      max_count = arg.max
1855
    elif arg.max is not None:
1856
      max_count += arg.max
1857

    
1858
    if idx == last_idx:
1859
      check_max = (arg.max is not None)
1860

    
1861
    elif arg.max is None:
1862
      raise errors.ProgrammerError("Only the last argument can have max=None")
1863

    
1864
  if check_max:
1865
    # Command with exact number of arguments
1866
    if (min_count is not None and max_count is not None and
1867
        min_count == max_count and len(args) != min_count):
1868
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1869
      return False
1870

    
1871
    # Command with limited number of arguments
1872
    if max_count is not None and len(args) > max_count:
1873
      ToStderr("Error: Command %s expects only %d argument(s)",
1874
               cmd, max_count)
1875
      return False
1876

    
1877
  # Command with some required arguments
1878
  if min_count is not None and len(args) < min_count:
1879
    ToStderr("Error: Command %s expects at least %d argument(s)",
1880
             cmd, min_count)
1881
    return False
1882

    
1883
  return True
1884

    
1885

    
1886
def SplitNodeOption(value):
1887
  """Splits the value of a --node option.
1888

1889
  """
1890
  if value and ":" in value:
1891
    return value.split(":", 1)
1892
  else:
1893
    return (value, None)
1894

    
1895

    
1896
def CalculateOSNames(os_name, os_variants):
1897
  """Calculates all the names an OS can be called, according to its variants.
1898

1899
  @type os_name: string
1900
  @param os_name: base name of the os
1901
  @type os_variants: list or None
1902
  @param os_variants: list of supported variants
1903
  @rtype: list
1904
  @return: list of valid names
1905

1906
  """
1907
  if os_variants:
1908
    return ["%s+%s" % (os_name, v) for v in os_variants]
1909
  else:
1910
    return [os_name]
1911

    
1912

    
1913
def ParseFields(selected, default):
1914
  """Parses the values of "--field"-like options.
1915

1916
  @type selected: string or None
1917
  @param selected: User-selected options
1918
  @type default: list
1919
  @param default: Default fields
1920

1921
  """
1922
  if selected is None:
1923
    return default
1924

    
1925
  if selected.startswith("+"):
1926
    return default + selected[1:].split(",")
1927

    
1928
  return selected.split(",")
1929

    
1930

    
1931
UsesRPC = rpc.RunWithRPC
1932

    
1933

    
1934
def AskUser(text, choices=None):
1935
  """Ask the user a question.
1936

1937
  @param text: the question to ask
1938

1939
  @param choices: list with elements tuples (input_char, return_value,
1940
      description); if not given, it will default to: [('y', True,
1941
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1942
      note that the '?' char is reserved for help
1943

1944
  @return: one of the return values from the choices list; if input is
1945
      not possible (i.e. not running with a tty, we return the last
1946
      entry from the list
1947

1948
  """
1949
  if choices is None:
1950
    choices = [("y", True, "Perform the operation"),
1951
               ("n", False, "Do not perform the operation")]
1952
  if not choices or not isinstance(choices, list):
1953
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1954
  for entry in choices:
1955
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1956
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1957

    
1958
  answer = choices[-1][1]
1959
  new_text = []
1960
  for line in text.splitlines():
1961
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1962
  text = "\n".join(new_text)
1963
  try:
1964
    f = file("/dev/tty", "a+")
1965
  except IOError:
1966
    return answer
1967
  try:
1968
    chars = [entry[0] for entry in choices]
1969
    chars[-1] = "[%s]" % chars[-1]
1970
    chars.append("?")
1971
    maps = dict([(entry[0], entry[1]) for entry in choices])
1972
    while True:
1973
      f.write(text)
1974
      f.write("\n")
1975
      f.write("/".join(chars))
1976
      f.write(": ")
1977
      line = f.readline(2).strip().lower()
1978
      if line in maps:
1979
        answer = maps[line]
1980
        break
1981
      elif line == "?":
1982
        for entry in choices:
1983
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1984
        f.write("\n")
1985
        continue
1986
  finally:
1987
    f.close()
1988
  return answer
1989

    
1990

    
1991
class JobSubmittedException(Exception):
1992
  """Job was submitted, client should exit.
1993

1994
  This exception has one argument, the ID of the job that was
1995
  submitted. The handler should print this ID.
1996

1997
  This is not an error, just a structured way to exit from clients.
1998

1999
  """
2000

    
2001

    
2002
def SendJob(ops, cl=None):
2003
  """Function to submit an opcode without waiting for the results.
2004

2005
  @type ops: list
2006
  @param ops: list of opcodes
2007
  @type cl: luxi.Client
2008
  @param cl: the luxi client to use for communicating with the master;
2009
             if None, a new client will be created
2010

2011
  """
2012
  if cl is None:
2013
    cl = GetClient()
2014

    
2015
  job_id = cl.SubmitJob(ops)
2016

    
2017
  return job_id
2018

    
2019

    
2020
def GenericPollJob(job_id, cbs, report_cbs):
2021
  """Generic job-polling function.
2022

2023
  @type job_id: number
2024
  @param job_id: Job ID
2025
  @type cbs: Instance of L{JobPollCbBase}
2026
  @param cbs: Data callbacks
2027
  @type report_cbs: Instance of L{JobPollReportCbBase}
2028
  @param report_cbs: Reporting callbacks
2029

2030
  """
2031
  prev_job_info = None
2032
  prev_logmsg_serial = None
2033

    
2034
  status = None
2035

    
2036
  while True:
2037
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2038
                                      prev_logmsg_serial)
2039
    if not result:
2040
      # job not found, go away!
2041
      raise errors.JobLost("Job with id %s lost" % job_id)
2042

    
2043
    if result == constants.JOB_NOTCHANGED:
2044
      report_cbs.ReportNotChanged(job_id, status)
2045

    
2046
      # Wait again
2047
      continue
2048

    
2049
    # Split result, a tuple of (field values, log entries)
2050
    (job_info, log_entries) = result
2051
    (status, ) = job_info
2052

    
2053
    if log_entries:
2054
      for log_entry in log_entries:
2055
        (serial, timestamp, log_type, message) = log_entry
2056
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2057
                                    log_type, message)
2058
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2059

    
2060
    # TODO: Handle canceled and archived jobs
2061
    elif status in (constants.JOB_STATUS_SUCCESS,
2062
                    constants.JOB_STATUS_ERROR,
2063
                    constants.JOB_STATUS_CANCELING,
2064
                    constants.JOB_STATUS_CANCELED):
2065
      break
2066

    
2067
    prev_job_info = job_info
2068

    
2069
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2070
  if not jobs:
2071
    raise errors.JobLost("Job with id %s lost" % job_id)
2072

    
2073
  status, opstatus, result = jobs[0]
2074

    
2075
  if status == constants.JOB_STATUS_SUCCESS:
2076
    return result
2077

    
2078
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2079
    raise errors.OpExecError("Job was canceled")
2080

    
2081
  has_ok = False
2082
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2083
    if status == constants.OP_STATUS_SUCCESS:
2084
      has_ok = True
2085
    elif status == constants.OP_STATUS_ERROR:
2086
      errors.MaybeRaise(msg)
2087

    
2088
      if has_ok:
2089
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2090
                                 (idx, msg))
2091

    
2092
      raise errors.OpExecError(str(msg))
2093

    
2094
  # default failure mode
2095
  raise errors.OpExecError(result)
2096

    
2097

    
2098
class JobPollCbBase:
2099
  """Base class for L{GenericPollJob} callbacks.
2100

2101
  """
2102
  def __init__(self):
2103
    """Initializes this class.
2104

2105
    """
2106

    
2107
  def WaitForJobChangeOnce(self, job_id, fields,
2108
                           prev_job_info, prev_log_serial):
2109
    """Waits for changes on a job.
2110

2111
    """
2112
    raise NotImplementedError()
2113

    
2114
  def QueryJobs(self, job_ids, fields):
2115
    """Returns the selected fields for the selected job IDs.
2116

2117
    @type job_ids: list of numbers
2118
    @param job_ids: Job IDs
2119
    @type fields: list of strings
2120
    @param fields: Fields
2121

2122
    """
2123
    raise NotImplementedError()
2124

    
2125

    
2126
class JobPollReportCbBase:
2127
  """Base class for L{GenericPollJob} reporting callbacks.
2128

2129
  """
2130
  def __init__(self):
2131
    """Initializes this class.
2132

2133
    """
2134

    
2135
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2136
    """Handles a log message.
2137

2138
    """
2139
    raise NotImplementedError()
2140

    
2141
  def ReportNotChanged(self, job_id, status):
2142
    """Called for if a job hasn't changed in a while.
2143

2144
    @type job_id: number
2145
    @param job_id: Job ID
2146
    @type status: string or None
2147
    @param status: Job status if available
2148

2149
    """
2150
    raise NotImplementedError()
2151

    
2152

    
2153
class _LuxiJobPollCb(JobPollCbBase):
2154
  def __init__(self, cl):
2155
    """Initializes this class.
2156

2157
    """
2158
    JobPollCbBase.__init__(self)
2159
    self.cl = cl
2160

    
2161
  def WaitForJobChangeOnce(self, job_id, fields,
2162
                           prev_job_info, prev_log_serial):
2163
    """Waits for changes on a job.
2164

2165
    """
2166
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2167
                                        prev_job_info, prev_log_serial)
2168

    
2169
  def QueryJobs(self, job_ids, fields):
2170
    """Returns the selected fields for the selected job IDs.
2171

2172
    """
2173
    return self.cl.QueryJobs(job_ids, fields)
2174

    
2175

    
2176
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2177
  def __init__(self, feedback_fn):
2178
    """Initializes this class.
2179

2180
    """
2181
    JobPollReportCbBase.__init__(self)
2182

    
2183
    self.feedback_fn = feedback_fn
2184

    
2185
    assert callable(feedback_fn)
2186

    
2187
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2188
    """Handles a log message.
2189

2190
    """
2191
    self.feedback_fn((timestamp, log_type, log_msg))
2192

    
2193
  def ReportNotChanged(self, job_id, status):
2194
    """Called if a job hasn't changed in a while.
2195

2196
    """
2197
    # Ignore
2198

    
2199

    
2200
class StdioJobPollReportCb(JobPollReportCbBase):
2201
  def __init__(self):
2202
    """Initializes this class.
2203

2204
    """
2205
    JobPollReportCbBase.__init__(self)
2206

    
2207
    self.notified_queued = False
2208
    self.notified_waitlock = False
2209

    
2210
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2211
    """Handles a log message.
2212

2213
    """
2214
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2215
             FormatLogMessage(log_type, log_msg))
2216

    
2217
  def ReportNotChanged(self, job_id, status):
2218
    """Called if a job hasn't changed in a while.
2219

2220
    """
2221
    if status is None:
2222
      return
2223

    
2224
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2225
      ToStderr("Job %s is waiting in queue", job_id)
2226
      self.notified_queued = True
2227

    
2228
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2229
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2230
      self.notified_waitlock = True
2231

    
2232

    
2233
def FormatLogMessage(log_type, log_msg):
2234
  """Formats a job message according to its type.
2235

2236
  """
2237
  if log_type != constants.ELOG_MESSAGE:
2238
    log_msg = str(log_msg)
2239

    
2240
  return utils.SafeEncode(log_msg)
2241

    
2242

    
2243
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2244
  """Function to poll for the result of a job.
2245

2246
  @type job_id: job identified
2247
  @param job_id: the job to poll for results
2248
  @type cl: luxi.Client
2249
  @param cl: the luxi client to use for communicating with the master;
2250
             if None, a new client will be created
2251

2252
  """
2253
  if cl is None:
2254
    cl = GetClient()
2255

    
2256
  if reporter is None:
2257
    if feedback_fn:
2258
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2259
    else:
2260
      reporter = StdioJobPollReportCb()
2261
  elif feedback_fn:
2262
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2263

    
2264
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2265

    
2266

    
2267
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2268
  """Legacy function to submit an opcode.
2269

2270
  This is just a simple wrapper over the construction of the processor
2271
  instance. It should be extended to better handle feedback and
2272
  interaction functions.
2273

2274
  """
2275
  if cl is None:
2276
    cl = GetClient()
2277

    
2278
  SetGenericOpcodeOpts([op], opts)
2279

    
2280
  job_id = SendJob([op], cl=cl)
2281
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2282
    ToStdout("%d" % job_id)
2283

    
2284
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2285
                       reporter=reporter)
2286

    
2287
  return op_results[0]
2288

    
2289

    
2290
def SubmitOpCodeToDrainedQueue(op):
2291
  """Forcefully insert a job in the queue, even if it is drained.
2292

2293
  """
2294
  cl = GetClient()
2295
  job_id = cl.SubmitJobToDrainedQueue([op])
2296
  op_results = PollJob(job_id, cl=cl)
2297
  return op_results[0]
2298

    
2299

    
2300
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2301
  """Wrapper around SubmitOpCode or SendJob.
2302

2303
  This function will decide, based on the 'opts' parameter, whether to
2304
  submit and wait for the result of the opcode (and return it), or
2305
  whether to just send the job and print its identifier. It is used in
2306
  order to simplify the implementation of the '--submit' option.
2307

2308
  It will also process the opcodes if we're sending the via SendJob
2309
  (otherwise SubmitOpCode does it).
2310

2311
  """
2312
  if opts and opts.submit_only:
2313
    job = [op]
2314
    SetGenericOpcodeOpts(job, opts)
2315
    job_id = SendJob(job, cl=cl)
2316
    if opts.print_jobid:
2317
      ToStdout("%d" % job_id)
2318
    raise JobSubmittedException(job_id)
2319
  else:
2320
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2321

    
2322

    
2323
def _InitReasonTrail(op, opts):
2324
  """Builds the first part of the reason trail
2325

2326
  Builds the initial part of the reason trail, adding the user provided reason
2327
  (if it exists) and the name of the command starting the operation.
2328

2329
  @param op: the opcode the reason trail will be added to
2330
  @param opts: the command line options selected by the user
2331

2332
  """
2333
  assert len(sys.argv) >= 2
2334
  trail = []
2335

    
2336
  if opts.reason:
2337
    trail.append((constants.OPCODE_REASON_SRC_USER,
2338
                  opts.reason,
2339
                  utils.EpochNano()))
2340

    
2341
  binary = os.path.basename(sys.argv[0])
2342
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2343
  command = sys.argv[1]
2344
  trail.append((source, command, utils.EpochNano()))
2345
  op.reason = trail
2346

    
2347

    
2348
def SetGenericOpcodeOpts(opcode_list, options):
2349
  """Processor for generic options.
2350

2351
  This function updates the given opcodes based on generic command
2352
  line options (like debug, dry-run, etc.).
2353

2354
  @param opcode_list: list of opcodes
2355
  @param options: command line options or None
2356
  @return: None (in-place modification)
2357

2358
  """
2359
  if not options:
2360
    return
2361
  for op in opcode_list:
2362
    op.debug_level = options.debug
2363
    if hasattr(options, "dry_run"):
2364
      op.dry_run = options.dry_run
2365
    if getattr(options, "priority", None) is not None:
2366
      op.priority = options.priority
2367
    _InitReasonTrail(op, options)
2368

    
2369

    
2370
def GetClient(query=False):
2371
  """Connects to the a luxi socket and returns a client.
2372

2373
  @type query: boolean
2374
  @param query: this signifies that the client will only be
2375
      used for queries; if the build-time parameter
2376
      enable-split-queries is enabled, then the client will be
2377
      connected to the query socket instead of the masterd socket
2378

2379
  """
2380
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2381
  if override_socket:
2382
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2383
      address = pathutils.MASTER_SOCKET
2384
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2385
      address = pathutils.QUERY_SOCKET
2386
    else:
2387
      address = override_socket
2388
  elif query and constants.ENABLE_SPLIT_QUERY:
2389
    address = pathutils.QUERY_SOCKET
2390
  else:
2391
    address = None
2392
  # TODO: Cache object?
2393
  try:
2394
    client = luxi.Client(address=address)
2395
  except luxi.NoMasterError:
2396
    ss = ssconf.SimpleStore()
2397

    
2398
    # Try to read ssconf file
2399
    try:
2400
      ss.GetMasterNode()
2401
    except errors.ConfigurationError:
2402
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2403
                                 " not part of a cluster",
2404
                                 errors.ECODE_INVAL)
2405

    
2406
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2407
    if master != myself:
2408
      raise errors.OpPrereqError("This is not the master node, please connect"
2409
                                 " to node '%s' and rerun the command" %
2410
                                 master, errors.ECODE_INVAL)
2411
    raise
2412
  return client
2413

    
2414

    
2415
def FormatError(err):
2416
  """Return a formatted error message for a given error.
2417

2418
  This function takes an exception instance and returns a tuple
2419
  consisting of two values: first, the recommended exit code, and
2420
  second, a string describing the error message (not
2421
  newline-terminated).
2422

2423
  """
2424
  retcode = 1
2425
  obuf = StringIO()
2426
  msg = str(err)
2427
  if isinstance(err, errors.ConfigurationError):
2428
    txt = "Corrupt configuration file: %s" % msg
2429
    logging.error(txt)
2430
    obuf.write(txt + "\n")
2431
    obuf.write("Aborting.")
2432
    retcode = 2
2433
  elif isinstance(err, errors.HooksAbort):
2434
    obuf.write("Failure: hooks execution failed:\n")
2435
    for node, script, out in err.args[0]:
2436
      if out:
2437
        obuf.write("  node: %s, script: %s, output: %s\n" %
2438
                   (node, script, out))
2439
      else:
2440
        obuf.write("  node: %s, script: %s (no output)\n" %
2441
                   (node, script))
2442
  elif isinstance(err, errors.HooksFailure):
2443
    obuf.write("Failure: hooks general failure: %s" % msg)
2444
  elif isinstance(err, errors.ResolverError):
2445
    this_host = netutils.Hostname.GetSysName()
2446
    if err.args[0] == this_host:
2447
      msg = "Failure: can't resolve my own hostname ('%s')"
2448
    else:
2449
      msg = "Failure: can't resolve hostname '%s'"
2450
    obuf.write(msg % err.args[0])
2451
  elif isinstance(err, errors.OpPrereqError):
2452
    if len(err.args) == 2:
2453
      obuf.write("Failure: prerequisites not met for this"
2454
                 " operation:\nerror type: %s, error details:\n%s" %
2455
                 (err.args[1], err.args[0]))
2456
    else:
2457
      obuf.write("Failure: prerequisites not met for this"
2458
                 " operation:\n%s" % msg)
2459
  elif isinstance(err, errors.OpExecError):
2460
    obuf.write("Failure: command execution error:\n%s" % msg)
2461
  elif isinstance(err, errors.TagError):
2462
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2463
  elif isinstance(err, errors.JobQueueDrainError):
2464
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2465
               " accept new requests\n")
2466
  elif isinstance(err, errors.JobQueueFull):
2467
    obuf.write("Failure: the job queue is full and doesn't accept new"
2468
               " job submissions until old jobs are archived\n")
2469
  elif isinstance(err, errors.TypeEnforcementError):
2470
    obuf.write("Parameter Error: %s" % msg)
2471
  elif isinstance(err, errors.ParameterError):
2472
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2473
  elif isinstance(err, luxi.NoMasterError):
2474
    if err.args[0] == pathutils.MASTER_SOCKET:
2475
      daemon = "the master daemon"
2476
    elif err.args[0] == pathutils.QUERY_SOCKET:
2477
      daemon = "the config daemon"
2478
    else:
2479
      daemon = "socket '%s'" % str(err.args[0])
2480
    obuf.write("Cannot communicate with %s.\nIs the process running"
2481
               " and listening for connections?" % daemon)
2482
  elif isinstance(err, luxi.TimeoutError):
2483
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2484
               " been submitted and will continue to run even if the call"
2485
               " timed out. Useful commands in this situation are \"gnt-job"
2486
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2487
    obuf.write(msg)
2488
  elif isinstance(err, luxi.PermissionError):
2489
    obuf.write("It seems you don't have permissions to connect to the"
2490
               " master daemon.\nPlease retry as a different user.")
2491
  elif isinstance(err, luxi.ProtocolError):
2492
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2493
               "%s" % msg)
2494
  elif isinstance(err, errors.JobLost):
2495
    obuf.write("Error checking job status: %s" % msg)
2496
  elif isinstance(err, errors.QueryFilterParseError):
2497
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2498
    obuf.write("\n".join(err.GetDetails()))
2499
  elif isinstance(err, errors.GenericError):
2500
    obuf.write("Unhandled Ganeti error: %s" % msg)
2501
  elif isinstance(err, JobSubmittedException):
2502
    obuf.write("JobID: %s\n" % err.args[0])
2503
    retcode = 0
2504
  else:
2505
    obuf.write("Unhandled exception: %s" % msg)
2506
  return retcode, obuf.getvalue().rstrip("\n")
2507

    
2508

    
2509
def GenericMain(commands, override=None, aliases=None,
2510
                env_override=frozenset()):
2511
  """Generic main function for all the gnt-* commands.
2512

2513
  @param commands: a dictionary with a special structure, see the design doc
2514
                   for command line handling.
2515
  @param override: if not None, we expect a dictionary with keys that will
2516
                   override command line options; this can be used to pass
2517
                   options from the scripts to generic functions
2518
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2519
  @param env_override: list of environment names which are allowed to submit
2520
                       default args for commands
2521

2522
  """
2523
  # save the program name and the entire command line for later logging
2524
  if sys.argv:
2525
    binary = os.path.basename(sys.argv[0])
2526
    if not binary:
2527
      binary = sys.argv[0]
2528

    
2529
    if len(sys.argv) >= 2:
2530
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2531
    else:
2532
      logname = binary
2533

    
2534
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2535
  else:
2536
    binary = "<unknown program>"
2537
    cmdline = "<unknown>"
2538

    
2539
  if aliases is None:
2540
    aliases = {}
2541

    
2542
  try:
2543
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2544
                                       env_override)
2545
  except _ShowVersion:
2546
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2547
             constants.RELEASE_VERSION)
2548
    return constants.EXIT_SUCCESS
2549
  except _ShowUsage, err:
2550
    for line in _FormatUsage(binary, commands):
2551
      ToStdout(line)
2552

    
2553
    if err.exit_error:
2554
      return constants.EXIT_FAILURE
2555
    else:
2556
      return constants.EXIT_SUCCESS
2557
  except errors.ParameterError, err:
2558
    result, err_msg = FormatError(err)
2559
    ToStderr(err_msg)
2560
    return 1
2561

    
2562
  if func is None: # parse error
2563
    return 1
2564

    
2565
  if override is not None:
2566
    for key, val in override.iteritems():
2567
      setattr(options, key, val)
2568

    
2569
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2570
                     stderr_logging=True)
2571

    
2572
  logging.info("Command line: %s", cmdline)
2573

    
2574
  try:
2575
    result = func(options, args)
2576
  except (errors.GenericError, luxi.ProtocolError,
2577
          JobSubmittedException), err:
2578
    result, err_msg = FormatError(err)
2579
    logging.exception("Error during command processing")
2580
    ToStderr(err_msg)
2581
  except KeyboardInterrupt:
2582
    result = constants.EXIT_FAILURE
2583
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2584
             " might have been submitted and"
2585
             " will continue to run in the background.")
2586
  except IOError, err:
2587
    if err.errno == errno.EPIPE:
2588
      # our terminal went away, we'll exit
2589
      sys.exit(constants.EXIT_FAILURE)
2590
    else:
2591
      raise
2592

    
2593
  return result
2594

    
2595

    
2596
def ParseNicOption(optvalue):
2597
  """Parses the value of the --net option(s).
2598

2599
  """
2600
  try:
2601
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2602
  except (TypeError, ValueError), err:
2603
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2604
                               errors.ECODE_INVAL)
2605

    
2606
  nics = [{}] * nic_max
2607
  for nidx, ndict in optvalue:
2608
    nidx = int(nidx)
2609

    
2610
    if not isinstance(ndict, dict):
2611
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2612
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2613

    
2614
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2615

    
2616
    nics[nidx] = ndict
2617

    
2618
  return nics
2619

    
2620

    
2621
def GenericInstanceCreate(mode, opts, args):
2622
  """Add an instance to the cluster via either creation or import.
2623

2624
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2625
  @param opts: the command line options selected by the user
2626
  @type args: list
2627
  @param args: should contain only one element, the new instance name
2628
  @rtype: int
2629
  @return: the desired exit code
2630

2631
  """
2632
  instance = args[0]
2633

    
2634
  (pnode, snode) = SplitNodeOption(opts.node)
2635

    
2636
  hypervisor = None
2637
  hvparams = {}
2638
  if opts.hypervisor:
2639
    hypervisor, hvparams = opts.hypervisor
2640

    
2641
  if opts.nics:
2642
    nics = ParseNicOption(opts.nics)
2643
  elif opts.no_nics:
2644
    # no nics
2645
    nics = []
2646
  elif mode == constants.INSTANCE_CREATE:
2647
    # default of one nic, all auto
2648
    nics = [{}]
2649
  else:
2650
    # mode == import
2651
    nics = []
2652

    
2653
  if opts.disk_template == constants.DT_DISKLESS:
2654
    if opts.disks or opts.sd_size is not None:
2655
      raise errors.OpPrereqError("Diskless instance but disk"
2656
                                 " information passed", errors.ECODE_INVAL)
2657
    disks = []
2658
  else:
2659
    if (not opts.disks and not opts.sd_size
2660
        and mode == constants.INSTANCE_CREATE):
2661
      raise errors.OpPrereqError("No disk information specified",
2662
                                 errors.ECODE_INVAL)
2663
    if opts.disks and opts.sd_size is not None:
2664
      raise errors.OpPrereqError("Please use either the '--disk' or"
2665
                                 " '-s' option", errors.ECODE_INVAL)
2666
    if opts.sd_size is not None:
2667
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2668

    
2669
    if opts.disks:
2670
      try:
2671
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2672
      except ValueError, err:
2673
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2674
                                   errors.ECODE_INVAL)
2675
      disks = [{}] * disk_max
2676
    else:
2677
      disks = []
2678
    for didx, ddict in opts.disks:
2679
      didx = int(didx)
2680
      if not isinstance(ddict, dict):
2681
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2682
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2683
      elif constants.IDISK_SIZE in ddict:
2684
        if constants.IDISK_ADOPT in ddict:
2685
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2686
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2687
        try:
2688
          ddict[constants.IDISK_SIZE] = \
2689
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2690
        except ValueError, err:
2691
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2692
                                     (didx, err), errors.ECODE_INVAL)
2693
      elif constants.IDISK_ADOPT in ddict:
2694
        if constants.IDISK_SPINDLES in ddict:
2695
          raise errors.OpPrereqError("spindles is not a valid option when"
2696
                                     " adopting a disk", errors.ECODE_INVAL)
2697
        if mode == constants.INSTANCE_IMPORT:
2698
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2699
                                     " import", errors.ECODE_INVAL)
2700
        ddict[constants.IDISK_SIZE] = 0
2701
      else:
2702
        raise errors.OpPrereqError("Missing size or adoption source for"
2703
                                   " disk %d" % didx, errors.ECODE_INVAL)
2704
      disks[didx] = ddict
2705

    
2706
  if opts.tags is not None:
2707
    tags = opts.tags.split(",")
2708
  else:
2709
    tags = []
2710

    
2711
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2712
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2713

    
2714
  if mode == constants.INSTANCE_CREATE:
2715
    start = opts.start
2716
    os_type = opts.os
2717
    force_variant = opts.force_variant
2718
    src_node = None
2719
    src_path = None
2720
    no_install = opts.no_install
2721
    identify_defaults = False
2722
  elif mode == constants.INSTANCE_IMPORT:
2723
    start = False
2724
    os_type = None
2725
    force_variant = False
2726
    src_node = opts.src_node
2727
    src_path = opts.src_dir
2728
    no_install = None
2729
    identify_defaults = opts.identify_defaults
2730
  else:
2731
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2732

    
2733
  op = opcodes.OpInstanceCreate(instance_name=instance,
2734
                                disks=disks,
2735
                                disk_template=opts.disk_template,
2736
                                nics=nics,
2737
                                conflicts_check=opts.conflicts_check,
2738
                                pnode=pnode, snode=snode,
2739
                                ip_check=opts.ip_check,
2740
                                name_check=opts.name_check,
2741
                                wait_for_sync=opts.wait_for_sync,
2742
                                file_storage_dir=opts.file_storage_dir,
2743
                                file_driver=opts.file_driver,
2744
                                iallocator=opts.iallocator,
2745
                                hypervisor=hypervisor,
2746
                                hvparams=hvparams,
2747
                                beparams=opts.beparams,
2748
                                osparams=opts.osparams,
2749
                                mode=mode,
2750
                                start=start,
2751
                                os_type=os_type,
2752
                                force_variant=force_variant,
2753
                                src_node=src_node,
2754
                                src_path=src_path,
2755
                                tags=tags,
2756
                                no_install=no_install,
2757
                                identify_defaults=identify_defaults,
2758
                                ignore_ipolicy=opts.ignore_ipolicy)
2759

    
2760
  SubmitOrSend(op, opts)
2761
  return 0
2762

    
2763

    
2764
class _RunWhileClusterStoppedHelper:
2765
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2766

2767
  """
2768
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2769
    """Initializes this class.
2770

2771
    @type feedback_fn: callable
2772
    @param feedback_fn: Feedback function
2773
    @type cluster_name: string
2774
    @param cluster_name: Cluster name
2775
    @type master_node: string
2776
    @param master_node Master node name
2777
    @type online_nodes: list
2778
    @param online_nodes: List of names of online nodes
2779

2780
    """
2781
    self.feedback_fn = feedback_fn
2782
    self.cluster_name = cluster_name
2783
    self.master_node = master_node
2784
    self.online_nodes = online_nodes
2785

    
2786
    self.ssh = ssh.SshRunner(self.cluster_name)
2787

    
2788
    self.nonmaster_nodes = [name for name in online_nodes
2789
                            if name != master_node]
2790

    
2791
    assert self.master_node not in self.nonmaster_nodes
2792

    
2793
  def _RunCmd(self, node_name, cmd):
2794
    """Runs a command on the local or a remote machine.
2795

2796
    @type node_name: string
2797
    @param node_name: Machine name
2798
    @type cmd: list
2799
    @param cmd: Command
2800

2801
    """
2802
    if node_name is None or node_name == self.master_node:
2803
      # No need to use SSH
2804
      result = utils.RunCmd(cmd)
2805
    else:
2806
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2807
                            utils.ShellQuoteArgs(cmd))
2808

    
2809
    if result.failed:
2810
      errmsg = ["Failed to run command %s" % result.cmd]
2811
      if node_name:
2812
        errmsg.append("on node %s" % node_name)
2813
      errmsg.append(": exitcode %s and error %s" %
2814
                    (result.exit_code, result.output))
2815
      raise errors.OpExecError(" ".join(errmsg))
2816

    
2817
  def Call(self, fn, *args):
2818
    """Call function while all daemons are stopped.
2819

2820
    @type fn: callable
2821
    @param fn: Function to be called
2822

2823
    """
2824
    # Pause watcher by acquiring an exclusive lock on watcher state file
2825
    self.feedback_fn("Blocking watcher")
2826
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2827
    try:
2828
      # TODO: Currently, this just blocks. There's no timeout.
2829
      # TODO: Should it be a shared lock?
2830
      watcher_block.Exclusive(blocking=True)
2831

    
2832
      # Stop master daemons, so that no new jobs can come in and all running
2833
      # ones are finished
2834
      self.feedback_fn("Stopping master daemons")
2835
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2836
      try:
2837
        # Stop daemons on all nodes
2838
        for node_name in self.online_nodes:
2839
          self.feedback_fn("Stopping daemons on %s" % node_name)
2840
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2841

    
2842
        # All daemons are shut down now
2843
        try:
2844
          return fn(self, *args)
2845
        except Exception, err:
2846
          _, errmsg = FormatError(err)
2847
          logging.exception("Caught exception")
2848
          self.feedback_fn(errmsg)
2849
          raise
2850
      finally:
2851
        # Start cluster again, master node last
2852
        for node_name in self.nonmaster_nodes + [self.master_node]:
2853
          self.feedback_fn("Starting daemons on %s" % node_name)
2854
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2855
    finally:
2856
      # Resume watcher
2857
      watcher_block.Close()
2858

    
2859

    
2860
def RunWhileClusterStopped(feedback_fn, fn, *args):
2861
  """Calls a function while all cluster daemons are stopped.
2862

2863
  @type feedback_fn: callable
2864
  @param feedback_fn: Feedback function
2865
  @type fn: callable
2866
  @param fn: Function to be called when daemons are stopped
2867

2868
  """
2869
  feedback_fn("Gathering cluster information")
2870

    
2871
  # This ensures we're running on the master daemon
2872
  cl = GetClient()
2873

    
2874
  (cluster_name, master_node) = \
2875
    cl.QueryConfigValues(["cluster_name", "master_node"])
2876

    
2877
  online_nodes = GetOnlineNodes([], cl=cl)
2878

    
2879
  # Don't keep a reference to the client. The master daemon will go away.
2880
  del cl
2881

    
2882
  assert master_node in online_nodes
2883

    
2884
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2885
                                       online_nodes).Call(fn, *args)
2886

    
2887

    
2888
def GenerateTable(headers, fields, separator, data,
2889
                  numfields=None, unitfields=None,
2890
                  units=None):
2891
  """Prints a table with headers and different fields.
2892

2893
  @type headers: dict
2894
  @param headers: dictionary mapping field names to headers for
2895
      the table
2896
  @type fields: list
2897
  @param fields: the field names corresponding to each row in
2898
      the data field
2899
  @param separator: the separator to be used; if this is None,
2900
      the default 'smart' algorithm is used which computes optimal
2901
      field width, otherwise just the separator is used between
2902
      each field
2903
  @type data: list
2904
  @param data: a list of lists, each sublist being one row to be output
2905
  @type numfields: list
2906
  @param numfields: a list with the fields that hold numeric
2907
      values and thus should be right-aligned
2908
  @type unitfields: list
2909
  @param unitfields: a list with the fields that hold numeric
2910
      values that should be formatted with the units field
2911
  @type units: string or None
2912
  @param units: the units we should use for formatting, or None for
2913
      automatic choice (human-readable for non-separator usage, otherwise
2914
      megabytes); this is a one-letter string
2915

2916
  """
2917
  if units is None:
2918
    if separator:
2919
      units = "m"
2920
    else:
2921
      units = "h"
2922

    
2923
  if numfields is None:
2924
    numfields = []
2925
  if unitfields is None:
2926
    unitfields = []
2927

    
2928
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2929
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2930

    
2931
  format_fields = []
2932
  for field in fields:
2933
    if headers and field not in headers:
2934
      # TODO: handle better unknown fields (either revert to old
2935
      # style of raising exception, or deal more intelligently with
2936
      # variable fields)
2937
      headers[field] = field
2938
    if separator is not None:
2939
      format_fields.append("%s")
2940
    elif numfields.Matches(field):
2941
      format_fields.append("%*s")
2942
    else:
2943
      format_fields.append("%-*s")
2944

    
2945
  if separator is None:
2946
    mlens = [0 for name in fields]
2947
    format_str = " ".join(format_fields)
2948
  else:
2949
    format_str = separator.replace("%", "%%").join(format_fields)
2950

    
2951
  for row in data:
2952
    if row is None:
2953
      continue
2954
    for idx, val in enumerate(row):
2955
      if unitfields.Matches(fields[idx]):
2956
        try:
2957
          val = int(val)
2958
        except (TypeError, ValueError):
2959
          pass
2960
        else:
2961
          val = row[idx] = utils.FormatUnit(val, units)
2962
      val = row[idx] = str(val)
2963
      if separator is None:
2964
        mlens[idx] = max(mlens[idx], len(val))
2965

    
2966
  result = []
2967
  if headers:
2968
    args = []
2969
    for idx, name in enumerate(fields):
2970
      hdr = headers[name]
2971
      if separator is None:
2972
        mlens[idx] = max(mlens[idx], len(hdr))
2973
        args.append(mlens[idx])
2974
      args.append(hdr)
2975
    result.append(format_str % tuple(args))
2976

    
2977
  if separator is None:
2978
    assert len(mlens) == len(fields)
2979

    
2980
    if fields and not numfields.Matches(fields[-1]):
2981
      mlens[-1] = 0
2982

    
2983
  for line in data:
2984
    args = []
2985
    if line is None:
2986
      line = ["-" for _ in fields]
2987
    for idx in range(len(fields)):
2988
      if separator is None:
2989
        args.append(mlens[idx])
2990
      args.append(line[idx])
2991
    result.append(format_str % tuple(args))
2992

    
2993
  return result
2994

    
2995

    
2996
def _FormatBool(value):
2997
  """Formats a boolean value as a string.
2998

2999
  """
3000
  if value:
3001
    return "Y"
3002
  return "N"
3003

    
3004

    
3005
#: Default formatting for query results; (callback, align right)
3006
_DEFAULT_FORMAT_QUERY = {
3007
  constants.QFT_TEXT: (str, False),
3008
  constants.QFT_BOOL: (_FormatBool, False),
3009
  constants.QFT_NUMBER: (str, True),
3010
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
3011
  constants.QFT_OTHER: (str, False),
3012
  constants.QFT_UNKNOWN: (str, False),
3013
  }
3014

    
3015

    
3016
def _GetColumnFormatter(fdef, override, unit):
3017
  """Returns formatting function for a field.
3018

3019
  @type fdef: L{objects.QueryFieldDefinition}
3020
  @type override: dict
3021
  @param override: Dictionary for overriding field formatting functions,
3022
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3023
  @type unit: string
3024
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3025
  @rtype: tuple; (callable, bool)
3026
  @return: Returns the function to format a value (takes one parameter) and a
3027
    boolean for aligning the value on the right-hand side
3028

3029
  """
3030
  fmt = override.get(fdef.name, None)
3031
  if fmt is not None:
3032
    return fmt
3033

    
3034
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3035

    
3036
  if fdef.kind == constants.QFT_UNIT:
3037
    # Can't keep this information in the static dictionary
3038
    return (lambda value: utils.FormatUnit(value, unit), True)
3039

    
3040
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3041
  if fmt is not None:
3042
    return fmt
3043

    
3044
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3045

    
3046

    
3047
class _QueryColumnFormatter:
3048
  """Callable class for formatting fields of a query.
3049

3050
  """
3051
  def __init__(self, fn, status_fn, verbose):
3052
    """Initializes this class.
3053

3054
    @type fn: callable
3055
    @param fn: Formatting function
3056
    @type status_fn: callable
3057
    @param status_fn: Function to report fields' status
3058
    @type verbose: boolean
3059
    @param verbose: whether to use verbose field descriptions or not
3060

3061
    """
3062
    self._fn = fn
3063
    self._status_fn = status_fn
3064
    self._verbose = verbose
3065

    
3066
  def __call__(self, data):
3067
    """Returns a field's string representation.
3068

3069
    """
3070
    (status, value) = data
3071

    
3072
    # Report status
3073
    self._status_fn(status)
3074

    
3075
    if status == constants.RS_NORMAL:
3076
      return self._fn(value)
3077

    
3078
    assert value is None, \
3079
           "Found value %r for abnormal status %s" % (value, status)
3080

    
3081
    return FormatResultError(status, self._verbose)
3082

    
3083

    
3084
def FormatResultError(status, verbose):
3085
  """Formats result status other than L{constants.RS_NORMAL}.
3086

3087
  @param status: The result status
3088
  @type verbose: boolean
3089
  @param verbose: Whether to return the verbose text
3090
  @return: Text of result status
3091

3092
  """
3093
  assert status != constants.RS_NORMAL, \
3094
         "FormatResultError called with status equal to constants.RS_NORMAL"
3095
  try:
3096
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3097
  except KeyError:
3098
    raise NotImplementedError("Unknown status %s" % status)
3099
  else:
3100
    if verbose:
3101
      return verbose_text
3102
    return normal_text
3103

    
3104

    
3105
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3106
                      header=False, verbose=False):
3107
  """Formats data in L{objects.QueryResponse}.
3108

3109
  @type result: L{objects.QueryResponse}
3110
  @param result: result of query operation
3111
  @type unit: string
3112
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3113
    see L{utils.text.FormatUnit}
3114
  @type format_override: dict
3115
  @param format_override: Dictionary for overriding field formatting functions,
3116
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3117
  @type separator: string or None
3118
  @param separator: String used to separate fields
3119
  @type header: bool
3120
  @param header: Whether to output header row
3121
  @type verbose: boolean
3122
  @param verbose: whether to use verbose field descriptions or not
3123

3124
  """
3125
  if unit is None:
3126
    if separator:
3127
      unit = "m"
3128
    else:
3129
      unit = "h"
3130

    
3131
  if format_override is None:
3132
    format_override = {}
3133

    
3134
  stats = dict.fromkeys(constants.RS_ALL, 0)
3135

    
3136
  def _RecordStatus(status):
3137
    if status in stats:
3138
      stats[status] += 1
3139

    
3140
  columns = []
3141
  for fdef in result.fields:
3142
    assert fdef.title and fdef.name
3143
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3144
    columns.append(TableColumn(fdef.title,
3145
                               _QueryColumnFormatter(fn, _RecordStatus,
3146
                                                     verbose),
3147
                               align_right))
3148

    
3149
  table = FormatTable(result.data, columns, header, separator)
3150

    
3151
  # Collect statistics
3152
  assert len(stats) == len(constants.RS_ALL)
3153
  assert compat.all(count >= 0 for count in stats.values())
3154

    
3155
  # Determine overall status. If there was no data, unknown fields must be
3156
  # detected via the field definitions.
3157
  if (stats[constants.RS_UNKNOWN] or
3158
      (not result.data and _GetUnknownFields(result.fields))):
3159
    status = QR_UNKNOWN
3160
  elif compat.any(count > 0 for key, count in stats.items()
3161
                  if key != constants.RS_NORMAL):
3162
    status = QR_INCOMPLETE
3163
  else:
3164
    status = QR_NORMAL
3165

    
3166
  return (status, table)
3167

    
3168

    
3169
def _GetUnknownFields(fdefs):
3170
  """Returns list of unknown fields included in C{fdefs}.
3171

3172
  @type fdefs: list of L{objects.QueryFieldDefinition}
3173

3174
  """
3175
  return [fdef for fdef in fdefs
3176
          if fdef.kind == constants.QFT_UNKNOWN]
3177

    
3178

    
3179
def _WarnUnknownFields(fdefs):
3180
  """Prints a warning to stderr if a query included unknown fields.
3181

3182
  @type fdefs: list of L{objects.QueryFieldDefinition}
3183

3184
  """
3185
  unknown = _GetUnknownFields(fdefs)
3186
  if unknown:
3187
    ToStderr("Warning: Queried for unknown fields %s",
3188
             utils.CommaJoin(fdef.name for fdef in unknown))
3189
    return True
3190

    
3191
  return False
3192

    
3193

    
3194
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3195
                format_override=None, verbose=False, force_filter=False,
3196
                namefield=None, qfilter=None, isnumeric=False):
3197
  """Generic implementation for listing all items of a resource.
3198

3199
  @param resource: One of L{constants.QR_VIA_LUXI}
3200
  @type fields: list of strings
3201
  @param fields: List of fields to query for
3202
  @type names: list of strings
3203
  @param names: Names of items to query for
3204
  @type unit: string or None
3205
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3206
    None for automatic choice (human-readable for non-separator usage,
3207
    otherwise megabytes); this is a one-letter string
3208
  @type separator: string or None
3209
  @param separator: String used to separate fields
3210
  @type header: bool
3211
  @param header: Whether to show header row
3212
  @type force_filter: bool
3213
  @param force_filter: Whether to always treat names as filter
3214
  @type format_override: dict
3215
  @param format_override: Dictionary for overriding field formatting functions,
3216
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3217
  @type verbose: boolean
3218
  @param verbose: whether to use verbose field descriptions or not
3219
  @type namefield: string
3220
  @param namefield: Name of field to use for simple filters (see
3221
    L{qlang.MakeFilter} for details)
3222
  @type qfilter: list or None
3223
  @param qfilter: Query filter (in addition to names)
3224
  @param isnumeric: bool
3225
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3226
    any simple filters built by namefield should use integer values to
3227
    reflect that
3228

3229
  """
3230
  if not names:
3231
    names = None
3232

    
3233
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3234
                                isnumeric=isnumeric)
3235

    
3236
  if qfilter is None:
3237
    qfilter = namefilter
3238
  elif namefilter is not None:
3239
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3240

    
3241
  if cl is None:
3242
    cl = GetClient()
3243

    
3244
  response = cl.Query(resource, fields, qfilter)
3245

    
3246
  found_unknown = _WarnUnknownFields(response.fields)
3247

    
3248
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3249
                                     header=header,
3250
                                     format_override=format_override,
3251
                                     verbose=verbose)
3252

    
3253
  for line in data:
3254
    ToStdout(line)
3255

    
3256
  assert ((found_unknown and status == QR_UNKNOWN) or
3257
          (not found_unknown and status != QR_UNKNOWN))
3258

    
3259
  if status == QR_UNKNOWN:
3260
    return constants.EXIT_UNKNOWN_FIELD
3261

    
3262
  # TODO: Should the list command fail if not all data could be collected?
3263
  return constants.EXIT_SUCCESS
3264

    
3265

    
3266
def _FieldDescValues(fdef):
3267
  """Helper function for L{GenericListFields} to get query field description.
3268

3269
  @type fdef: L{objects.QueryFieldDefinition}
3270
  @rtype: list
3271

3272
  """
3273
  return [
3274
    fdef.name,
3275
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3276
    fdef.title,
3277
    fdef.doc,
3278
    ]
3279

    
3280

    
3281
def GenericListFields(resource, fields, separator, header, cl=None):
3282
  """Generic implementation for listing fields for a resource.
3283

3284
  @param resource: One of L{constants.QR_VIA_LUXI}
3285
  @type fields: list of strings
3286
  @param fields: List of fields to query for
3287
  @type separator: string or None
3288
  @param separator: String used to separate fields
3289
  @type header: bool
3290
  @param header: Whether to show header row
3291

3292
  """
3293
  if cl is None:
3294
    cl = GetClient()
3295

    
3296
  if not fields:
3297
    fields = None
3298

    
3299
  response = cl.QueryFields(resource, fields)
3300

    
3301
  found_unknown = _WarnUnknownFields(response.fields)
3302

    
3303
  columns = [
3304
    TableColumn("Name", str, False),
3305
    TableColumn("Type", str, False),
3306
    TableColumn("Title", str, False),
3307
    TableColumn("Description", str, False),
3308
    ]
3309

    
3310
  rows = map(_FieldDescValues, response.fields)
3311

    
3312
  for line in FormatTable(rows, columns, header, separator):
3313
    ToStdout(line)
3314

    
3315
  if found_unknown:
3316
    return constants.EXIT_UNKNOWN_FIELD
3317

    
3318
  return constants.EXIT_SUCCESS
3319

    
3320

    
3321
class TableColumn:
3322
  """Describes a column for L{FormatTable}.
3323

3324
  """
3325
  def __init__(self, title, fn, align_right):
3326
    """Initializes this class.
3327

3328
    @type title: string
3329
    @param title: Column title
3330
    @type fn: callable
3331
    @param fn: Formatting function
3332
    @type align_right: bool
3333
    @param align_right: Whether to align values on the right-hand side
3334

3335
    """
3336
    self.title = title
3337
    self.format = fn
3338
    self.align_right = align_right
3339

    
3340

    
3341
def _GetColFormatString(width, align_right):
3342
  """Returns the format string for a field.
3343

3344
  """
3345
  if align_right:
3346
    sign = ""
3347
  else:
3348
    sign = "-"
3349

    
3350
  return "%%%s%ss" % (sign, width)
3351

    
3352

    
3353
def FormatTable(rows, columns, header, separator):
3354
  """Formats data as a table.
3355

3356
  @type rows: list of lists
3357
  @param rows: Row data, one list per row
3358
  @type columns: list of L{TableColumn}
3359
  @param columns: Column descriptions
3360
  @type header: bool
3361
  @param header: Whether to show header row
3362
  @type separator: string or None
3363
  @param separator: String used to separate columns
3364

3365
  """
3366
  if header:
3367
    data = [[col.title for col in columns]]
3368
    colwidth = [len(col.title) for col in columns]
3369
  else:
3370
    data = []
3371
    colwidth = [0 for _ in columns]
3372

    
3373
  # Format row data
3374
  for row in rows:
3375
    assert len(row) == len(columns)
3376

    
3377
    formatted = [col.format(value) for value, col in zip(row, columns)]
3378

    
3379
    if separator is None:
3380
      # Update column widths
3381
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3382
        # Modifying a list's items while iterating is fine
3383
        colwidth[idx] = max(oldwidth, len(value))
3384

    
3385
    data.append(formatted)
3386

    
3387
  if separator is not None:
3388
    # Return early if a separator is used
3389
    return [separator.join(row) for row in data]
3390

    
3391
  if columns and not columns[-1].align_right:
3392
    # Avoid unnecessary spaces at end of line
3393
    colwidth[-1] = 0
3394

    
3395
  # Build format string
3396
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3397
                  for col, width in zip(columns, colwidth)])
3398

    
3399
  return [fmt % tuple(row) for row in data]
3400

    
3401

    
3402
def FormatTimestamp(ts):
3403
  """Formats a given timestamp.
3404

3405
  @type ts: timestamp
3406
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3407

3408
  @rtype: string
3409
  @return: a string with the formatted timestamp
3410

3411
  """
3412
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3413
    return "?"
3414

    
3415
  (sec, usecs) = ts
3416
  return utils.FormatTime(sec, usecs=usecs)
3417

    
3418

    
3419
def ParseTimespec(value):
3420
  """Parse a time specification.
3421

3422
  The following suffixed will be recognized:
3423

3424
    - s: seconds
3425
    - m: minutes
3426
    - h: hours
3427
    - d: day
3428
    - w: weeks
3429

3430
  Without any suffix, the value will be taken to be in seconds.
3431

3432
  """
3433
  value = str(value)
3434
  if not value:
3435
    raise errors.OpPrereqError("Empty time specification passed",
3436
                               errors.ECODE_INVAL)
3437
  suffix_map = {
3438
    "s": 1,
3439
    "m": 60,
3440
    "h": 3600,
3441
    "d": 86400,
3442
    "w": 604800,
3443
    }
3444
  if value[-1] not in suffix_map:
3445
    try:
3446
      value = int(value)
3447
    except (TypeError, ValueError):
3448
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3449
                                 errors.ECODE_INVAL)
3450
  else:
3451
    multiplier = suffix_map[value[-1]]
3452
    value = value[:-1]
3453
    if not value: # no data left after stripping the suffix
3454
      raise errors.OpPrereqError("Invalid time specification (only"
3455
                                 " suffix passed)", errors.ECODE_INVAL)
3456
    try:
3457
      value = int(value) * multiplier
3458
    except (TypeError, ValueError):
3459
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3460
                                 errors.ECODE_INVAL)
3461
  return value
3462

    
3463

    
3464
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3465
                   filter_master=False, nodegroup=None):
3466
  """Returns the names of online nodes.
3467

3468
  This function will also log a warning on stderr with the names of
3469
  the online nodes.
3470

3471
  @param nodes: if not empty, use only this subset of nodes (minus the
3472
      offline ones)
3473
  @param cl: if not None, luxi client to use
3474
  @type nowarn: boolean
3475
  @param nowarn: by default, this function will output a note with the
3476
      offline nodes that are skipped; if this parameter is True the
3477
      note is not displayed
3478
  @type secondary_ips: boolean
3479
  @param secondary_ips: if True, return the secondary IPs instead of the
3480
      names, useful for doing network traffic over the replication interface
3481
      (if any)
3482
  @type filter_master: boolean
3483
  @param filter_master: if True, do not return the master node in the list
3484
      (useful in coordination with secondary_ips where we cannot check our
3485
      node name against the list)
3486
  @type nodegroup: string
3487
  @param nodegroup: If set, only return nodes in this node group
3488

3489
  """
3490
  if cl is None:
3491
    cl = GetClient()
3492

    
3493
  qfilter = []
3494

    
3495
  if nodes:
3496
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3497

    
3498
  if nodegroup is not None:
3499
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3500
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3501

    
3502
  if filter_master:
3503
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3504

    
3505
  if qfilter:
3506
    if len(qfilter) > 1:
3507
      final_filter = [qlang.OP_AND] + qfilter
3508
    else:
3509
      assert len(qfilter) == 1
3510
      final_filter = qfilter[0]
3511
  else:
3512
    final_filter = None
3513

    
3514
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3515

    
3516
  def _IsOffline(row):
3517
    (_, (_, offline), _) = row
3518
    return offline
3519

    
3520
  def _GetName(row):
3521
    ((_, name), _, _) = row
3522
    return name
3523

    
3524
  def _GetSip(row):
3525
    (_, _, (_, sip)) = row
3526
    return sip
3527

    
3528
  (offline, online) = compat.partition(result.data, _IsOffline)
3529

    
3530
  if offline and not nowarn:
3531
    ToStderr("Note: skipping offline node(s): %s" %
3532
             utils.CommaJoin(map(_GetName, offline)))
3533

    
3534
  if secondary_ips:
3535
    fn = _GetSip
3536
  else:
3537
    fn = _GetName
3538

    
3539
  return map(fn, online)
3540

    
3541

    
3542
def _ToStream(stream, txt, *args):
3543
  """Write a message to a stream, bypassing the logging system
3544

3545
  @type stream: file object
3546
  @param stream: the file to which we should write
3547
  @type txt: str
3548
  @param txt: the message
3549

3550
  """
3551
  try:
3552
    if args:
3553
      args = tuple(args)
3554
      stream.write(txt % args)
3555
    else:
3556
      stream.write(txt)
3557
    stream.write("\n")
3558
    stream.flush()
3559
  except IOError, err:
3560
    if err.errno == errno.EPIPE:
3561
      # our terminal went away, we'll exit
3562
      sys.exit(constants.EXIT_FAILURE)
3563
    else:
3564
      raise
3565

    
3566

    
3567
def ToStdout(txt, *args):
3568
  """Write a message to stdout only, bypassing the logging system
3569

3570
  This is just a wrapper over _ToStream.
3571

3572
  @type txt: str
3573
  @param txt: the message
3574

3575
  """
3576
  _ToStream(sys.stdout, txt, *args)
3577

    
3578

    
3579
def ToStderr(txt, *args):
3580
  """Write a message to stderr only, bypassing the logging system
3581

3582
  This is just a wrapper over _ToStream.
3583

3584
  @type txt: str
3585
  @param txt: the message
3586

3587
  """
3588
  _ToStream(sys.stderr, txt, *args)
3589

    
3590

    
3591
class JobExecutor(object):
3592
  """Class which manages the submission and execution of multiple jobs.
3593

3594
  Note that instances of this class should not be reused between
3595
  GetResults() calls.
3596

3597
  """
3598
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3599
    self.queue = []
3600
    if cl is None:
3601
      cl = GetClient()
3602
    self.cl = cl
3603
    self.verbose = verbose
3604
    self.jobs = []
3605
    self.opts = opts
3606
    self.feedback_fn = feedback_fn
3607
    self._counter = itertools.count()
3608

    
3609
  @staticmethod
3610
  def _IfName(name, fmt):
3611
    """Helper function for formatting name.
3612

3613
    """
3614
    if name:
3615
      return fmt % name
3616

    
3617
    return ""
3618

    
3619
  def QueueJob(self, name, *ops):
3620
    """Record a job for later submit.
3621

3622
    @type name: string
3623
    @param name: a description of the job, will be used in WaitJobSet
3624

3625
    """
3626
    SetGenericOpcodeOpts(ops, self.opts)
3627
    self.queue.append((self._counter.next(), name, ops))
3628

    
3629
  def AddJobId(self, name, status, job_id):
3630
    """Adds a job ID to the internal queue.
3631

3632
    """
3633
    self.jobs.append((self._counter.next(), status, job_id, name))
3634

    
3635
  def SubmitPending(self, each=False):
3636
    """Submit all pending jobs.
3637

3638
    """
3639
    if each:
3640
      results = []
3641
      for (_, _, ops) in self.queue:
3642
        # SubmitJob will remove the success status, but raise an exception if
3643
        # the submission fails, so we'll notice that anyway.
3644
        results.append([True, self.cl.SubmitJob(ops)[0]])
3645
    else:
3646
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3647
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3648
      self.jobs.append((idx, status, data, name))
3649

    
3650
  def _ChooseJob(self):
3651
    """Choose a non-waiting/queued job to poll next.
3652

3653
    """
3654
    assert self.jobs, "_ChooseJob called with empty job list"
3655

    
3656
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3657
                               ["status"])
3658
    assert result
3659

    
3660
    for job_data, status in zip(self.jobs, result):
3661
      if (isinstance(status, list) and status and
3662
          status[0] in (constants.JOB_STATUS_QUEUED,
3663
                        constants.JOB_STATUS_WAITING,
3664
                        constants.JOB_STATUS_CANCELING)):
3665
        # job is still present and waiting
3666
        continue
3667
      # good candidate found (either running job or lost job)
3668
      self.jobs.remove(job_data)
3669
      return job_data
3670

    
3671
    # no job found
3672
    return self.jobs.pop(0)
3673

    
3674
  def GetResults(self):
3675
    """Wait for and return the results of all jobs.
3676

3677
    @rtype: list
3678
    @return: list of tuples (success, job results), in the same order
3679
        as the submitted jobs; if a job has failed, instead of the result
3680
        there will be the error message
3681

3682
    """
3683
    if not self.jobs:
3684
      self.SubmitPending()
3685
    results = []
3686
    if self.verbose:
3687
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3688
      if ok_jobs:
3689
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3690

    
3691
    # first, remove any non-submitted jobs
3692
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3693
    for idx, _, jid, name in failures:
3694
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3695
      results.append((idx, False, jid))
3696

    
3697
    while self.jobs:
3698
      (idx, _, jid, name) = self._ChooseJob()
3699
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3700
      try:
3701
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3702
        success = True
3703
      except errors.JobLost, err:
3704
        _, job_result = FormatError(err)
3705
        ToStderr("Job %s%s has been archived, cannot check its result",
3706
                 jid, self._IfName(name, " for %s"))
3707
        success = False
3708
      except (errors.GenericError, luxi.ProtocolError), err:
3709
        _, job_result = FormatError(err)
3710
        success = False
3711
        # the error message will always be shown, verbose or not
3712
        ToStderr("Job %s%s has failed: %s",
3713
                 jid, self._IfName(name, " for %s"), job_result)
3714

    
3715
      results.append((idx, success, job_result))
3716

    
3717
    # sort based on the index, then drop it
3718
    results.sort()
3719
    results = [i[1:] for i in results]
3720

    
3721
    return results
3722

    
3723
  def WaitOrShow(self, wait):
3724
    """Wait for job results or only print the job IDs.
3725

3726
    @type wait: boolean
3727
    @param wait: whether to wait or not
3728

3729
    """
3730
    if wait:
3731
      return self.GetResults()
3732
    else:
3733
      if not self.jobs:
3734
        self.SubmitPending()
3735
      for _, status, result, name in self.jobs:
3736
        if status:
3737
          ToStdout("%s: %s", result, name)
3738
        else:
3739
          ToStderr("Failure for %s: %s", name, result)
3740
      return [row[1:3] for row in self.jobs]
3741

    
3742

    
3743
def FormatParamsDictInfo(param_dict, actual):
3744
  """Formats a parameter dictionary.
3745

3746
  @type param_dict: dict
3747
  @param param_dict: the own parameters
3748
  @type actual: dict
3749
  @param actual: the current parameter set (including defaults)
3750
  @rtype: dict
3751
  @return: dictionary where the value of each parameter is either a fully
3752
      formatted string or a dictionary containing formatted strings
3753

3754
  """
3755
  ret = {}
3756
  for (key, data) in actual.items():
3757
    if isinstance(data, dict) and data:
3758
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3759
    else:
3760
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3761
  return ret
3762

    
3763

    
3764
def _FormatListInfoDefault(data, def_data):
3765
  if data is not None:
3766
    ret = utils.CommaJoin(data)
3767
  else:
3768
    ret = "default (%s)" % utils.CommaJoin(def_data)
3769
  return ret
3770

    
3771

    
3772
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3773
  """Formats an instance policy.
3774

3775
  @type custom_ipolicy: dict
3776
  @param custom_ipolicy: own policy
3777
  @type eff_ipolicy: dict
3778
  @param eff_ipolicy: effective policy (including defaults); ignored for
3779
      cluster
3780
  @type iscluster: bool
3781
  @param iscluster: the policy is at cluster level
3782
  @rtype: list of pairs
3783
  @return: formatted data, suitable for L{PrintGenericInfo}
3784

3785
  """
3786
  if iscluster:
3787
    eff_ipolicy = custom_ipolicy
3788

    
3789
  minmax_out = []
3790
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3791
  if custom_minmax:
3792
    for (k, minmax) in enumerate(custom_minmax):
3793
      minmax_out.append([
3794
        ("%s/%s" % (key, k),
3795
         FormatParamsDictInfo(minmax[key], minmax[key]))
3796
        for key in constants.ISPECS_MINMAX_KEYS
3797
        ])
3798
  else:
3799
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3800
      minmax_out.append([
3801
        ("%s/%s" % (key, k),
3802
         FormatParamsDictInfo({}, minmax[key]))
3803
        for key in constants.ISPECS_MINMAX_KEYS
3804
        ])
3805
  ret = [("bounds specs", minmax_out)]
3806

    
3807
  if iscluster:
3808
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3809
    ret.append(
3810
      (constants.ISPECS_STD,
3811
       FormatParamsDictInfo(stdspecs, stdspecs))
3812
      )
3813

    
3814
  ret.append(
3815
    ("allowed disk templates",
3816
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3817
                            eff_ipolicy[constants.IPOLICY_DTS]))
3818
    )
3819
  ret.extend([
3820
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3821
    for key in constants.IPOLICY_PARAMETERS
3822
    ])
3823
  return ret
3824

    
3825

    
3826
def _PrintSpecsParameters(buf, specs):
3827
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3828
  buf.write(",".join(values))
3829

    
3830

    
3831
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3832
  """Print the command option used to generate the given instance policy.
3833

3834
  Currently only the parts dealing with specs are supported.
3835

3836
  @type buf: StringIO
3837
  @param buf: stream to write into
3838
  @type ipolicy: dict
3839
  @param ipolicy: instance policy
3840
  @type isgroup: bool
3841
  @param isgroup: whether the policy is at group level
3842

3843
  """
3844
  if not isgroup:
3845
    stdspecs = ipolicy.get("std")
3846
    if stdspecs:
3847
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3848
      _PrintSpecsParameters(buf, stdspecs)
3849
  minmaxes = ipolicy.get("minmax", [])
3850
  first = True
3851
  for minmax in minmaxes:
3852
    minspecs = minmax.get("min")
3853
    maxspecs = minmax.get("max")
3854
    if minspecs and maxspecs:
3855
      if first:
3856
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3857
        first = False
3858
      else:
3859
        buf.write("//")
3860
      buf.write("min:")
3861
      _PrintSpecsParameters(buf, minspecs)
3862
      buf.write("/max:")
3863
      _PrintSpecsParameters(buf, maxspecs)
3864

    
3865

    
3866
def ConfirmOperation(names, list_type, text, extra=""):
3867
  """Ask the user to confirm an operation on a list of list_type.
3868

3869
  This function is used to request confirmation for doing an operation
3870
  on a given list of list_type.
3871

3872
  @type names: list
3873
  @param names: the list of names that we display when
3874
      we ask for confirmation
3875
  @type list_type: str
3876
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3877
  @type text: str
3878
  @param text: the operation that the user should confirm
3879
  @rtype: boolean
3880
  @return: True or False depending on user's confirmation.
3881

3882
  """
3883
  count = len(names)
3884
  msg = ("The %s will operate on %d %s.\n%s"
3885
         "Do you want to continue?" % (text, count, list_type, extra))
3886
  affected = (("\nAffected %s:\n" % list_type) +
3887
              "\n".join(["  %s" % name for name in names]))
3888

    
3889
  choices = [("y", True, "Yes, execute the %s" % text),
3890
             ("n", False, "No, abort the %s" % text)]
3891

    
3892
  if count > 20:
3893
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3894
    question = msg
3895
  else:
3896
    question = msg + affected
3897

    
3898
  choice = AskUser(question, choices)
3899
  if choice == "v":
3900
    choices.pop(1)
3901
    choice = AskUser(msg + affected, choices)
3902
  return choice
3903

    
3904

    
3905
def _MaybeParseUnit(elements):
3906
  """Parses and returns an array of potential values with units.
3907

3908
  """
3909
  parsed = {}
3910
  for k, v in elements.items():
3911
    if v == constants.VALUE_DEFAULT:
3912
      parsed[k] = v
3913
    else:
3914
      parsed[k] = utils.ParseUnit(v)
3915
  return parsed
3916

    
3917

    
3918
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3919
                             ispecs_disk_count, ispecs_disk_size,
3920
                             ispecs_nic_count, group_ipolicy, fill_all):
3921
  try:
3922
    if ispecs_mem_size:
3923
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3924
    if ispecs_disk_size:
3925
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3926
  except (TypeError, ValueError, errors.UnitParseError), err:
3927
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3928
                               " in policy: %s" %
3929
                               (ispecs_disk_size, ispecs_mem_size, err),
3930
                               errors.ECODE_INVAL)
3931

    
3932
  # prepare ipolicy dict
3933
  ispecs_transposed = {
3934
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3935
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3936
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3937
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3938
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3939
    }
3940

    
3941
  # first, check that the values given are correct
3942
  if group_ipolicy:
3943
    forced_type = TISPECS_GROUP_TYPES
3944
  else:
3945
    forced_type = TISPECS_CLUSTER_TYPES
3946
  for specs in ispecs_transposed.values():
3947
    assert type(specs) is dict
3948
    utils.ForceDictType(specs, forced_type)
3949

    
3950
  # then transpose
3951
  ispecs = {
3952
    constants.ISPECS_MIN: {},
3953
    constants.ISPECS_MAX: {},
3954
    constants.ISPECS_STD: {},
3955
    }
3956
  for (name, specs) in ispecs_transposed.iteritems():
3957
    assert name in constants.ISPECS_PARAMETERS
3958
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3959
      assert key in ispecs
3960
      ispecs[key][name] = val
3961
  minmax_out = {}
3962
  for key in constants.ISPECS_MINMAX_KEYS:
3963
    if fill_all:
3964
      minmax_out[key] = \
3965
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3966
    else:
3967
      minmax_out[key] = ispecs[key]
3968
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3969
  if fill_all:
3970
    ipolicy[constants.ISPECS_STD] = \
3971
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3972
                         ispecs[constants.ISPECS_STD])
3973
  else:
3974
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3975

    
3976

    
3977
def _ParseSpecUnit(spec, keyname):
3978
  ret = spec.copy()
3979
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3980
    if k in ret:
3981
      try:
3982
        ret[k] = utils.ParseUnit(ret[k])
3983
      except (TypeError, ValueError, errors.UnitParseError), err:
3984
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3985
                                    " specs: %s" % (k, ret[k], keyname, err)),
3986
                                   errors.ECODE_INVAL)
3987
  return ret
3988

    
3989

    
3990
def _ParseISpec(spec, keyname, required):
3991
  ret = _ParseSpecUnit(spec, keyname)
3992
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3993
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3994
  if required and missing:
3995
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3996
                               (keyname, utils.CommaJoin(missing)),
3997
                               errors.ECODE_INVAL)
3998
  return ret
3999

    
4000

    
4001
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4002
  ret = None
4003
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
4004
      len(minmax_ispecs[0]) == 1):
4005
    for (key, spec) in minmax_ispecs[0].items():
4006
      # This loop is executed exactly once
4007
      if key in allowed_values and not spec:
4008
        ret = key
4009
  return ret
4010

    
4011

    
4012
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4013
                            group_ipolicy, allowed_values):
4014
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4015
  if found_allowed is not None:
4016
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4017
  elif minmax_ispecs is not None:
4018
    minmax_out = []
4019
    for mmpair in minmax_ispecs:
4020
      mmpair_out = {}
4021
      for (key, spec) in mmpair.items():
4022
        if key not in constants.ISPECS_MINMAX_KEYS:
4023
          msg = "Invalid key in bounds instance specifications: %s" % key
4024
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4025
        mmpair_out[key] = _ParseISpec(spec, key, True)
4026
      minmax_out.append(mmpair_out)
4027
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4028
  if std_ispecs is not None:
4029
    assert not group_ipolicy # This is not an option for gnt-group
4030
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4031

    
4032

    
4033
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4034
                          ispecs_cpu_count=None,
4035
                          ispecs_disk_count=None,
4036
                          ispecs_disk_size=None,
4037
                          ispecs_nic_count=None,
4038
                          minmax_ispecs=None,
4039
                          std_ispecs=None,
4040
                          ipolicy_disk_templates=None,
4041
                          ipolicy_vcpu_ratio=None,
4042
                          ipolicy_spindle_ratio=None,
4043
                          group_ipolicy=False,
4044
                          allowed_values=None,
4045
                          fill_all=False):
4046
  """Creation of instance policy based on command line options.
4047

4048
  @param fill_all: whether for cluster policies we should ensure that
4049
    all values are filled
4050

4051
  """
4052
  assert not (fill_all and allowed_values)
4053

    
4054
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4055
                 ispecs_disk_size or ispecs_nic_count)
4056
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4057
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4058
                               " together with any --ipolicy-xxx-specs option",
4059
                               errors.ECODE_INVAL)
4060

    
4061
  ipolicy_out = objects.MakeEmptyIPolicy()
4062
  if split_specs:
4063
    assert fill_all
4064
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4065
                             ispecs_disk_count, ispecs_disk_size,
4066
                             ispecs_nic_count, group_ipolicy, fill_all)
4067
  elif (minmax_ispecs is not None or std_ispecs is not None):
4068
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4069
                            group_ipolicy, allowed_values)
4070

    
4071
  if ipolicy_disk_templates is not None:
4072
    if allowed_values and ipolicy_disk_templates in allowed_values:
4073
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4074
    else:
4075
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4076
  if ipolicy_vcpu_ratio is not None:
4077
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4078
  if ipolicy_spindle_ratio is not None:
4079
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4080

    
4081
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4082

    
4083
  if not group_ipolicy and fill_all:
4084
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4085

    
4086
  return ipolicy_out
4087

    
4088

    
4089
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4090
  """Formatting core of L{PrintGenericInfo}.
4091

4092
  @param buf: (string) stream to accumulate the result into
4093
  @param data: data to format
4094
  @type level: int
4095
  @param level: depth in the data hierarchy, used for indenting
4096
  @type afterkey: bool
4097
  @param afterkey: True when we are in the middle of a line after a key (used
4098
      to properly add newlines or indentation)
4099

4100
  """
4101
  baseind = "  "
4102
  if isinstance(data, dict):
4103
    if not data:
4104
      buf.write("\n")
4105
    else:
4106
      if afterkey:
4107
        buf.write("\n")
4108
        doindent = True
4109
      else:
4110
        doindent = False
4111
      for key in sorted(data):
4112
        if doindent:
4113
          buf.write(baseind * level)
4114
        else:
4115
          doindent = True
4116
        buf.write(key)
4117
        buf.write(": ")
4118
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4119
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4120
    # list of tuples (an ordered dictionary)
4121
    if afterkey:
4122
      buf.write("\n")
4123
      doindent = True
4124
    else:
4125
      doindent = False
4126
    for (key, val) in data:
4127
      if doindent:
4128
        buf.write(baseind * level)
4129
      else:
4130
        doindent = True
4131
      buf.write(key)
4132
      buf.write(": ")
4133
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4134
  elif isinstance(data, list):
4135
    if not data:
4136
      buf.write("\n")
4137
    else:
4138
      if afterkey:
4139
        buf.write("\n")
4140
        doindent = True
4141
      else:
4142
        doindent = False
4143
      for item in data:
4144
        if doindent:
4145
          buf.write(baseind * level)
4146
        else:
4147
          doindent = True
4148
        buf.write("-")
4149
        buf.write(baseind[1:])
4150
        _SerializeGenericInfo(buf, item, level + 1)
4151
  else:
4152
    # This branch should be only taken for strings, but it's practically
4153
    # impossible to guarantee that no other types are produced somewhere
4154
    buf.write(str(data))
4155
    buf.write("\n")
4156

    
4157

    
4158
def PrintGenericInfo(data):
4159
  """Print information formatted according to the hierarchy.
4160

4161
  The output is a valid YAML string.
4162

4163
  @param data: the data to print. It's a hierarchical structure whose elements
4164
      can be:
4165
        - dictionaries, where keys are strings and values are of any of the
4166
          types listed here
4167
        - lists of pairs (key, value), where key is a string and value is of
4168
          any of the types listed here; it's a way to encode ordered
4169
          dictionaries
4170
        - lists of any of the types listed here
4171
        - strings
4172

4173
  """
4174
  buf = StringIO()
4175
  _SerializeGenericInfo(buf, data, 0)
4176
  ToStdout(buf.getvalue().rstrip("\n"))