Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ fe5a2780

History | View | Annotate | Download (136.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INCLUDEDEFAULTS_OPT",
112
  "INTERVAL_OPT",
113
  "MAC_PREFIX_OPT",
114
  "MAINTAIN_NODE_HEALTH_OPT",
115
  "MASTER_NETDEV_OPT",
116
  "MASTER_NETMASK_OPT",
117
  "MC_OPT",
118
  "MIGRATION_MODE_OPT",
119
  "NET_OPT",
120
  "NETWORK_OPT",
121
  "NETWORK6_OPT",
122
  "NEW_CLUSTER_CERT_OPT",
123
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
124
  "NEW_CONFD_HMAC_KEY_OPT",
125
  "NEW_RAPI_CERT_OPT",
126
  "NEW_PRIMARY_OPT",
127
  "NEW_SECONDARY_OPT",
128
  "NEW_SPICE_CERT_OPT",
129
  "NIC_PARAMS_OPT",
130
  "NOCONFLICTSCHECK_OPT",
131
  "NODE_FORCE_JOIN_OPT",
132
  "NODE_LIST_OPT",
133
  "NODE_PLACEMENT_OPT",
134
  "NODEGROUP_OPT",
135
  "NODE_PARAMS_OPT",
136
  "NODE_POWERED_OPT",
137
  "NODRBD_STORAGE_OPT",
138
  "NOHDR_OPT",
139
  "NOIPCHECK_OPT",
140
  "NO_INSTALL_OPT",
141
  "NONAMECHECK_OPT",
142
  "NOLVM_STORAGE_OPT",
143
  "NOMODIFY_ETCHOSTS_OPT",
144
  "NOMODIFY_SSH_SETUP_OPT",
145
  "NONICS_OPT",
146
  "NONLIVE_OPT",
147
  "NONPLUS1_OPT",
148
  "NORUNTIME_CHGS_OPT",
149
  "NOSHUTDOWN_OPT",
150
  "NOSTART_OPT",
151
  "NOSSH_KEYCHECK_OPT",
152
  "NOVOTING_OPT",
153
  "NO_REMEMBER_OPT",
154
  "NWSYNC_OPT",
155
  "OFFLINE_INST_OPT",
156
  "ONLINE_INST_OPT",
157
  "ON_PRIMARY_OPT",
158
  "ON_SECONDARY_OPT",
159
  "OFFLINE_OPT",
160
  "OSPARAMS_OPT",
161
  "OS_OPT",
162
  "OS_SIZE_OPT",
163
  "OOB_TIMEOUT_OPT",
164
  "POWER_DELAY_OPT",
165
  "PREALLOC_WIPE_DISKS_OPT",
166
  "PRIMARY_IP_VERSION_OPT",
167
  "PRIMARY_ONLY_OPT",
168
  "PRINT_JOBID_OPT",
169
  "PRIORITY_OPT",
170
  "RAPI_CERT_OPT",
171
  "READD_OPT",
172
  "REASON_OPT",
173
  "REBOOT_TYPE_OPT",
174
  "REMOVE_INSTANCE_OPT",
175
  "REMOVE_RESERVED_IPS_OPT",
176
  "REMOVE_UIDS_OPT",
177
  "RESERVED_LVS_OPT",
178
  "RUNTIME_MEM_OPT",
179
  "ROMAN_OPT",
180
  "SECONDARY_IP_OPT",
181
  "SECONDARY_ONLY_OPT",
182
  "SELECT_OS_OPT",
183
  "SEP_OPT",
184
  "SHOWCMD_OPT",
185
  "SHOW_MACHINE_OPT",
186
  "SHUTDOWN_TIMEOUT_OPT",
187
  "SINGLE_NODE_OPT",
188
  "SPECS_CPU_COUNT_OPT",
189
  "SPECS_DISK_COUNT_OPT",
190
  "SPECS_DISK_SIZE_OPT",
191
  "SPECS_MEM_SIZE_OPT",
192
  "SPECS_NIC_COUNT_OPT",
193
  "SPLIT_ISPECS_OPTS",
194
  "IPOLICY_STD_SPECS_OPT",
195
  "IPOLICY_DISK_TEMPLATES",
196
  "IPOLICY_VCPU_RATIO",
197
  "SPICE_CACERT_OPT",
198
  "SPICE_CERT_OPT",
199
  "SRC_DIR_OPT",
200
  "SRC_NODE_OPT",
201
  "SUBMIT_OPT",
202
  "SUBMIT_OPTS",
203
  "STARTUP_PAUSED_OPT",
204
  "STATIC_OPT",
205
  "SYNC_OPT",
206
  "TAG_ADD_OPT",
207
  "TAG_SRC_OPT",
208
  "TIMEOUT_OPT",
209
  "TO_GROUP_OPT",
210
  "UIDPOOL_OPT",
211
  "USEUNITS_OPT",
212
  "USE_EXTERNAL_MIP_SCRIPT",
213
  "USE_REPL_NET_OPT",
214
  "VERBOSE_OPT",
215
  "VG_NAME_OPT",
216
  "WFSYNC_OPT",
217
  "YES_DOIT_OPT",
218
  "DISK_STATE_OPT",
219
  "HV_STATE_OPT",
220
  "IGNORE_IPOLICY_OPT",
221
  "INSTANCE_POLICY_OPTS",
222
  # Generic functions for CLI programs
223
  "ConfirmOperation",
224
  "CreateIPolicyFromOpts",
225
  "GenericMain",
226
  "GenericInstanceCreate",
227
  "GenericList",
228
  "GenericListFields",
229
  "GetClient",
230
  "GetOnlineNodes",
231
  "JobExecutor",
232
  "JobSubmittedException",
233
  "ParseTimespec",
234
  "RunWhileClusterStopped",
235
  "SubmitOpCode",
236
  "SubmitOrSend",
237
  "UsesRPC",
238
  # Formatting functions
239
  "ToStderr", "ToStdout",
240
  "FormatError",
241
  "FormatQueryResult",
242
  "FormatParamsDictInfo",
243
  "FormatPolicyInfo",
244
  "PrintIPolicyCommand",
245
  "PrintGenericInfo",
246
  "GenerateTable",
247
  "AskUser",
248
  "FormatTimestamp",
249
  "FormatLogMessage",
250
  # Tags functions
251
  "ListTags",
252
  "AddTags",
253
  "RemoveTags",
254
  # command line options support infrastructure
255
  "ARGS_MANY_INSTANCES",
256
  "ARGS_MANY_NODES",
257
  "ARGS_MANY_GROUPS",
258
  "ARGS_MANY_NETWORKS",
259
  "ARGS_NONE",
260
  "ARGS_ONE_INSTANCE",
261
  "ARGS_ONE_NODE",
262
  "ARGS_ONE_GROUP",
263
  "ARGS_ONE_OS",
264
  "ARGS_ONE_NETWORK",
265
  "ArgChoice",
266
  "ArgCommand",
267
  "ArgFile",
268
  "ArgGroup",
269
  "ArgHost",
270
  "ArgInstance",
271
  "ArgJobId",
272
  "ArgNetwork",
273
  "ArgNode",
274
  "ArgOs",
275
  "ArgExtStorage",
276
  "ArgSuggest",
277
  "ArgUnknown",
278
  "OPT_COMPL_INST_ADD_NODES",
279
  "OPT_COMPL_MANY_NODES",
280
  "OPT_COMPL_ONE_IALLOCATOR",
281
  "OPT_COMPL_ONE_INSTANCE",
282
  "OPT_COMPL_ONE_NODE",
283
  "OPT_COMPL_ONE_NODEGROUP",
284
  "OPT_COMPL_ONE_NETWORK",
285
  "OPT_COMPL_ONE_OS",
286
  "OPT_COMPL_ONE_EXTSTORAGE",
287
  "cli_option",
288
  "SplitNodeOption",
289
  "CalculateOSNames",
290
  "ParseFields",
291
  "COMMON_CREATE_OPTS",
292
  ]
293

    
294
NO_PREFIX = "no_"
295
UN_PREFIX = "-"
296

    
297
#: Priorities (sorted)
298
_PRIORITY_NAMES = [
299
  ("low", constants.OP_PRIO_LOW),
300
  ("normal", constants.OP_PRIO_NORMAL),
301
  ("high", constants.OP_PRIO_HIGH),
302
  ]
303

    
304
#: Priority dictionary for easier lookup
305
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
306
# we migrate to Python 2.6
307
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
308

    
309
# Query result status for clients
310
(QR_NORMAL,
311
 QR_UNKNOWN,
312
 QR_INCOMPLETE) = range(3)
313

    
314
#: Maximum batch size for ChooseJob
315
_CHOOSE_BATCH = 25
316

    
317

    
318
# constants used to create InstancePolicy dictionary
319
TISPECS_GROUP_TYPES = {
320
  constants.ISPECS_MIN: constants.VTYPE_INT,
321
  constants.ISPECS_MAX: constants.VTYPE_INT,
322
  }
323

    
324
TISPECS_CLUSTER_TYPES = {
325
  constants.ISPECS_MIN: constants.VTYPE_INT,
326
  constants.ISPECS_MAX: constants.VTYPE_INT,
327
  constants.ISPECS_STD: constants.VTYPE_INT,
328
  }
329

    
330
#: User-friendly names for query2 field types
331
_QFT_NAMES = {
332
  constants.QFT_UNKNOWN: "Unknown",
333
  constants.QFT_TEXT: "Text",
334
  constants.QFT_BOOL: "Boolean",
335
  constants.QFT_NUMBER: "Number",
336
  constants.QFT_UNIT: "Storage size",
337
  constants.QFT_TIMESTAMP: "Timestamp",
338
  constants.QFT_OTHER: "Custom",
339
  }
340

    
341

    
342
class _Argument:
343
  def __init__(self, min=0, max=None): # pylint: disable=W0622
344
    self.min = min
345
    self.max = max
346

    
347
  def __repr__(self):
348
    return ("<%s min=%s max=%s>" %
349
            (self.__class__.__name__, self.min, self.max))
350

    
351

    
352
class ArgSuggest(_Argument):
353
  """Suggesting argument.
354

355
  Value can be any of the ones passed to the constructor.
356

357
  """
358
  # pylint: disable=W0622
359
  def __init__(self, min=0, max=None, choices=None):
360
    _Argument.__init__(self, min=min, max=max)
361
    self.choices = choices
362

    
363
  def __repr__(self):
364
    return ("<%s min=%s max=%s choices=%r>" %
365
            (self.__class__.__name__, self.min, self.max, self.choices))
366

    
367

    
368
class ArgChoice(ArgSuggest):
369
  """Choice argument.
370

371
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
372
  but value must be one of the choices.
373

374
  """
375

    
376

    
377
class ArgUnknown(_Argument):
378
  """Unknown argument to program (e.g. determined at runtime).
379

380
  """
381

    
382

    
383
class ArgInstance(_Argument):
384
  """Instances argument.
385

386
  """
387

    
388

    
389
class ArgNode(_Argument):
390
  """Node argument.
391

392
  """
393

    
394

    
395
class ArgNetwork(_Argument):
396
  """Network argument.
397

398
  """
399

    
400

    
401
class ArgGroup(_Argument):
402
  """Node group argument.
403

404
  """
405

    
406

    
407
class ArgJobId(_Argument):
408
  """Job ID argument.
409

410
  """
411

    
412

    
413
class ArgFile(_Argument):
414
  """File path argument.
415

416
  """
417

    
418

    
419
class ArgCommand(_Argument):
420
  """Command argument.
421

422
  """
423

    
424

    
425
class ArgHost(_Argument):
426
  """Host argument.
427

428
  """
429

    
430

    
431
class ArgOs(_Argument):
432
  """OS argument.
433

434
  """
435

    
436

    
437
class ArgExtStorage(_Argument):
438
  """ExtStorage argument.
439

440
  """
441

    
442

    
443
ARGS_NONE = []
444
ARGS_MANY_INSTANCES = [ArgInstance()]
445
ARGS_MANY_NETWORKS = [ArgNetwork()]
446
ARGS_MANY_NODES = [ArgNode()]
447
ARGS_MANY_GROUPS = [ArgGroup()]
448
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
449
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
450
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
451
# TODO
452
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
453
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
454

    
455

    
456
def _ExtractTagsObject(opts, args):
457
  """Extract the tag type object.
458

459
  Note that this function will modify its args parameter.
460

461
  """
462
  if not hasattr(opts, "tag_type"):
463
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
464
  kind = opts.tag_type
465
  if kind == constants.TAG_CLUSTER:
466
    retval = kind, None
467
  elif kind in (constants.TAG_NODEGROUP,
468
                constants.TAG_NODE,
469
                constants.TAG_NETWORK,
470
                constants.TAG_INSTANCE):
471
    if not args:
472
      raise errors.OpPrereqError("no arguments passed to the command",
473
                                 errors.ECODE_INVAL)
474
    name = args.pop(0)
475
    retval = kind, name
476
  else:
477
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
478
  return retval
479

    
480

    
481
def _ExtendTags(opts, args):
482
  """Extend the args if a source file has been given.
483

484
  This function will extend the tags with the contents of the file
485
  passed in the 'tags_source' attribute of the opts parameter. A file
486
  named '-' will be replaced by stdin.
487

488
  """
489
  fname = opts.tags_source
490
  if fname is None:
491
    return
492
  if fname == "-":
493
    new_fh = sys.stdin
494
  else:
495
    new_fh = open(fname, "r")
496
  new_data = []
497
  try:
498
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
499
    # because of python bug 1633941
500
    while True:
501
      line = new_fh.readline()
502
      if not line:
503
        break
504
      new_data.append(line.strip())
505
  finally:
506
    new_fh.close()
507
  args.extend(new_data)
508

    
509

    
510
def ListTags(opts, args):
511
  """List the tags on a given object.
512

513
  This is a generic implementation that knows how to deal with all
514
  three cases of tag objects (cluster, node, instance). The opts
515
  argument is expected to contain a tag_type field denoting what
516
  object type we work on.
517

518
  """
519
  kind, name = _ExtractTagsObject(opts, args)
520
  cl = GetClient(query=True)
521
  result = cl.QueryTags(kind, name)
522
  result = list(result)
523
  result.sort()
524
  for tag in result:
525
    ToStdout(tag)
526

    
527

    
528
def AddTags(opts, args):
529
  """Add tags on a given object.
530

531
  This is a generic implementation that knows how to deal with all
532
  three cases of tag objects (cluster, node, instance). The opts
533
  argument is expected to contain a tag_type field denoting what
534
  object type we work on.
535

536
  """
537
  kind, name = _ExtractTagsObject(opts, args)
538
  _ExtendTags(opts, args)
539
  if not args:
540
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
541
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
542
  SubmitOrSend(op, opts)
543

    
544

    
545
def RemoveTags(opts, args):
546
  """Remove tags from a given object.
547

548
  This is a generic implementation that knows how to deal with all
549
  three cases of tag objects (cluster, node, instance). The opts
550
  argument is expected to contain a tag_type field denoting what
551
  object type we work on.
552

553
  """
554
  kind, name = _ExtractTagsObject(opts, args)
555
  _ExtendTags(opts, args)
556
  if not args:
557
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
558
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
559
  SubmitOrSend(op, opts)
560

    
561

    
562
def check_unit(option, opt, value): # pylint: disable=W0613
563
  """OptParsers custom converter for units.
564

565
  """
566
  try:
567
    return utils.ParseUnit(value)
568
  except errors.UnitParseError, err:
569
    raise OptionValueError("option %s: %s" % (opt, err))
570

    
571

    
572
def _SplitKeyVal(opt, data, parse_prefixes):
573
  """Convert a KeyVal string into a dict.
574

575
  This function will convert a key=val[,...] string into a dict. Empty
576
  values will be converted specially: keys which have the prefix 'no_'
577
  will have the value=False and the prefix stripped, keys with the prefix
578
  "-" will have value=None and the prefix stripped, and the others will
579
  have value=True.
580

581
  @type opt: string
582
  @param opt: a string holding the option name for which we process the
583
      data, used in building error messages
584
  @type data: string
585
  @param data: a string of the format key=val,key=val,...
586
  @type parse_prefixes: bool
587
  @param parse_prefixes: whether to handle prefixes specially
588
  @rtype: dict
589
  @return: {key=val, key=val}
590
  @raises errors.ParameterError: if there are duplicate keys
591

592
  """
593
  kv_dict = {}
594
  if data:
595
    for elem in utils.UnescapeAndSplit(data, sep=","):
596
      if "=" in elem:
597
        key, val = elem.split("=", 1)
598
      elif parse_prefixes:
599
        if elem.startswith(NO_PREFIX):
600
          key, val = elem[len(NO_PREFIX):], False
601
        elif elem.startswith(UN_PREFIX):
602
          key, val = elem[len(UN_PREFIX):], None
603
        else:
604
          key, val = elem, True
605
      else:
606
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
607
                                    (elem, opt))
608
      if key in kv_dict:
609
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
610
                                    (key, opt))
611
      kv_dict[key] = val
612
  return kv_dict
613

    
614

    
615
def _SplitIdentKeyVal(opt, value, parse_prefixes):
616
  """Helper function to parse "ident:key=val,key=val" options.
617

618
  @type opt: string
619
  @param opt: option name, used in error messages
620
  @type value: string
621
  @param value: expected to be in the format "ident:key=val,key=val,..."
622
  @type parse_prefixes: bool
623
  @param parse_prefixes: whether to handle prefixes specially (see
624
      L{_SplitKeyVal})
625
  @rtype: tuple
626
  @return: (ident, {key=val, key=val})
627
  @raises errors.ParameterError: in case of duplicates or other parsing errors
628

629
  """
630
  if ":" not in value:
631
    ident, rest = value, ""
632
  else:
633
    ident, rest = value.split(":", 1)
634

    
635
  if parse_prefixes and ident.startswith(NO_PREFIX):
636
    if rest:
637
      msg = "Cannot pass options when removing parameter groups: %s" % value
638
      raise errors.ParameterError(msg)
639
    retval = (ident[len(NO_PREFIX):], False)
640
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
641
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
642
    if rest:
643
      msg = "Cannot pass options when removing parameter groups: %s" % value
644
      raise errors.ParameterError(msg)
645
    retval = (ident[len(UN_PREFIX):], None)
646
  else:
647
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
648
    retval = (ident, kv_dict)
649
  return retval
650

    
651

    
652
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
653
  """Custom parser for ident:key=val,key=val options.
654

655
  This will store the parsed values as a tuple (ident, {key: val}). As such,
656
  multiple uses of this option via action=append is possible.
657

658
  """
659
  return _SplitIdentKeyVal(opt, value, True)
660

    
661

    
662
def check_key_val(option, opt, value):  # pylint: disable=W0613
663
  """Custom parser class for key=val,key=val options.
664

665
  This will store the parsed values as a dict {key: val}.
666

667
  """
668
  return _SplitKeyVal(opt, value, True)
669

    
670

    
671
def _SplitListKeyVal(opt, value):
672
  retval = {}
673
  for elem in value.split("/"):
674
    if not elem:
675
      raise errors.ParameterError("Empty section in option '%s'" % opt)
676
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
677
    if ident in retval:
678
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
679
             (ident, opt, elem))
680
      raise errors.ParameterError(msg)
681
    retval[ident] = valdict
682
  return retval
683

    
684

    
685
def check_multilist_ident_key_val(_, opt, value):
686
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
687

688
  @rtype: list of dictionary
689
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
690

691
  """
692
  retval = []
693
  for line in value.split("//"):
694
    retval.append(_SplitListKeyVal(opt, line))
695
  return retval
696

    
697

    
698
def check_bool(option, opt, value): # pylint: disable=W0613
699
  """Custom parser for yes/no options.
700

701
  This will store the parsed value as either True or False.
702

703
  """
704
  value = value.lower()
705
  if value == constants.VALUE_FALSE or value == "no":
706
    return False
707
  elif value == constants.VALUE_TRUE or value == "yes":
708
    return True
709
  else:
710
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
711

    
712

    
713
def check_list(option, opt, value): # pylint: disable=W0613
714
  """Custom parser for comma-separated lists.
715

716
  """
717
  # we have to make this explicit check since "".split(",") is [""],
718
  # not an empty list :(
719
  if not value:
720
    return []
721
  else:
722
    return utils.UnescapeAndSplit(value)
723

    
724

    
725
def check_maybefloat(option, opt, value): # pylint: disable=W0613
726
  """Custom parser for float numbers which might be also defaults.
727

728
  """
729
  value = value.lower()
730

    
731
  if value == constants.VALUE_DEFAULT:
732
    return value
733
  else:
734
    return float(value)
735

    
736

    
737
# completion_suggestion is normally a list. Using numeric values not evaluating
738
# to False for dynamic completion.
739
(OPT_COMPL_MANY_NODES,
740
 OPT_COMPL_ONE_NODE,
741
 OPT_COMPL_ONE_INSTANCE,
742
 OPT_COMPL_ONE_OS,
743
 OPT_COMPL_ONE_EXTSTORAGE,
744
 OPT_COMPL_ONE_IALLOCATOR,
745
 OPT_COMPL_ONE_NETWORK,
746
 OPT_COMPL_INST_ADD_NODES,
747
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
748

    
749
OPT_COMPL_ALL = compat.UniqueFrozenset([
750
  OPT_COMPL_MANY_NODES,
751
  OPT_COMPL_ONE_NODE,
752
  OPT_COMPL_ONE_INSTANCE,
753
  OPT_COMPL_ONE_OS,
754
  OPT_COMPL_ONE_EXTSTORAGE,
755
  OPT_COMPL_ONE_IALLOCATOR,
756
  OPT_COMPL_ONE_NETWORK,
757
  OPT_COMPL_INST_ADD_NODES,
758
  OPT_COMPL_ONE_NODEGROUP,
759
  ])
760

    
761

    
762
class CliOption(Option):
763
  """Custom option class for optparse.
764

765
  """
766
  ATTRS = Option.ATTRS + [
767
    "completion_suggest",
768
    ]
769
  TYPES = Option.TYPES + (
770
    "multilistidentkeyval",
771
    "identkeyval",
772
    "keyval",
773
    "unit",
774
    "bool",
775
    "list",
776
    "maybefloat",
777
    )
778
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
779
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
780
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
781
  TYPE_CHECKER["keyval"] = check_key_val
782
  TYPE_CHECKER["unit"] = check_unit
783
  TYPE_CHECKER["bool"] = check_bool
784
  TYPE_CHECKER["list"] = check_list
785
  TYPE_CHECKER["maybefloat"] = check_maybefloat
786

    
787

    
788
# optparse.py sets make_option, so we do it for our own option class, too
789
cli_option = CliOption
790

    
791

    
792
_YORNO = "yes|no"
793

    
794
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
795
                       help="Increase debugging level")
796

    
797
NOHDR_OPT = cli_option("--no-headers", default=False,
798
                       action="store_true", dest="no_headers",
799
                       help="Don't display column headers")
800

    
801
SEP_OPT = cli_option("--separator", default=None,
802
                     action="store", dest="separator",
803
                     help=("Separator between output fields"
804
                           " (defaults to one space)"))
805

    
806
USEUNITS_OPT = cli_option("--units", default=None,
807
                          dest="units", choices=("h", "m", "g", "t"),
808
                          help="Specify units for output (one of h/m/g/t)")
809

    
810
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
811
                        type="string", metavar="FIELDS",
812
                        help="Comma separated list of output fields")
813

    
814
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
815
                       default=False, help="Force the operation")
816

    
817
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
818
                         default=False, help="Do not require confirmation")
819

    
820
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
821
                                  action="store_true", default=False,
822
                                  help=("Ignore offline nodes and do as much"
823
                                        " as possible"))
824

    
825
TAG_ADD_OPT = cli_option("--tags", dest="tags",
826
                         default=None, help="Comma-separated list of instance"
827
                                            " tags")
828

    
829
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
830
                         default=None, help="File with tag names")
831

    
832
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
833
                        default=False, action="store_true",
834
                        help=("Submit the job and return the job ID, but"
835
                              " don't wait for the job to finish"))
836

    
837
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
838
                             default=False, action="store_true",
839
                             help=("Additionally print the job as first line"
840
                                   " on stdout (for scripting)."))
841

    
842
SYNC_OPT = cli_option("--sync", dest="do_locking",
843
                      default=False, action="store_true",
844
                      help=("Grab locks while doing the queries"
845
                            " in order to ensure more consistent results"))
846

    
847
DRY_RUN_OPT = cli_option("--dry-run", default=False,
848
                         action="store_true",
849
                         help=("Do not execute the operation, just run the"
850
                               " check steps and verify if it could be"
851
                               " executed"))
852

    
853
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
854
                         action="store_true",
855
                         help="Increase the verbosity of the operation")
856

    
857
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
858
                              action="store_true", dest="simulate_errors",
859
                              help="Debugging option that makes the operation"
860
                              " treat most runtime checks as failed")
861

    
862
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
863
                        default=True, action="store_false",
864
                        help="Don't wait for sync (DANGEROUS!)")
865

    
866
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
867
                        default=False, action="store_true",
868
                        help="Wait for disks to sync")
869

    
870
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
871
                             action="store_true", default=False,
872
                             help="Enable offline instance")
873

    
874
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
875
                              action="store_true", default=False,
876
                              help="Disable down instance")
877

    
878
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
879
                               help=("Custom disk setup (%s)" %
880
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
881
                               default=None, metavar="TEMPL",
882
                               choices=list(constants.DISK_TEMPLATES))
883

    
884
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
885
                        help="Do not create any network cards for"
886
                        " the instance")
887

    
888
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
889
                               help="Relative path under default cluster-wide"
890
                               " file storage dir to store file-based disks",
891
                               default=None, metavar="<DIR>")
892

    
893
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
894
                                  help="Driver to use for image files",
895
                                  default="loop", metavar="<DRIVER>",
896
                                  choices=list(constants.FILE_DRIVER))
897

    
898
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
899
                            help="Select nodes for the instance automatically"
900
                            " using the <NAME> iallocator plugin",
901
                            default=None, type="string",
902
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
903

    
904
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
905
                                    metavar="<NAME>",
906
                                    help="Set the default instance"
907
                                    " allocator plugin",
908
                                    default=None, type="string",
909
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
910

    
911
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
912
                    metavar="<os>",
913
                    completion_suggest=OPT_COMPL_ONE_OS)
914

    
915
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
916
                          type="keyval", default={},
917
                          help="OS parameters")
918

    
919
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
920
                               action="store_true", default=False,
921
                               help="Force an unknown variant")
922

    
923
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
924
                            action="store_true", default=False,
925
                            help="Do not install the OS (will"
926
                            " enable no-start)")
927

    
928
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
929
                                dest="allow_runtime_chgs",
930
                                default=True, action="store_false",
931
                                help="Don't allow runtime changes")
932

    
933
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
934
                         type="keyval", default={},
935
                         help="Backend parameters")
936

    
937
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
938
                        default={}, dest="hvparams",
939
                        help="Hypervisor parameters")
940

    
941
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
942
                             help="Disk template parameters, in the format"
943
                             " template:option=value,option=value,...",
944
                             type="identkeyval", action="append", default=[])
945

    
946
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
947
                                 type="keyval", default={},
948
                                 help="Memory size specs: list of key=value,"
949
                                " where key is one of min, max, std"
950
                                 " (in MB or using a unit)")
951

    
952
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
953
                                 type="keyval", default={},
954
                                 help="CPU count specs: list of key=value,"
955
                                 " where key is one of min, max, std")
956

    
957
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
958
                                  dest="ispecs_disk_count",
959
                                  type="keyval", default={},
960
                                  help="Disk count specs: list of key=value,"
961
                                  " where key is one of min, max, std")
962

    
963
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
964
                                 type="keyval", default={},
965
                                 help="Disk size specs: list of key=value,"
966
                                 " where key is one of min, max, std"
967
                                 " (in MB or using a unit)")
968

    
969
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
970
                                 type="keyval", default={},
971
                                 help="NIC count specs: list of key=value,"
972
                                 " where key is one of min, max, std")
973

    
974
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
975
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
976
                                      dest="ipolicy_bounds_specs",
977
                                      type="multilistidentkeyval", default=None,
978
                                      help="Complete instance specs limits")
979

    
980
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
981
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
982
                                   dest="ipolicy_std_specs",
983
                                   type="keyval", default=None,
984
                                   help="Complte standard instance specs")
985

    
986
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
987
                                    dest="ipolicy_disk_templates",
988
                                    type="list", default=None,
989
                                    help="Comma-separated list of"
990
                                    " enabled disk templates")
991

    
992
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
993
                                 dest="ipolicy_vcpu_ratio",
994
                                 type="maybefloat", default=None,
995
                                 help="The maximum allowed vcpu-to-cpu ratio")
996

    
997
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
998
                                   dest="ipolicy_spindle_ratio",
999
                                   type="maybefloat", default=None,
1000
                                   help=("The maximum allowed instances to"
1001
                                         " spindle ratio"))
1002

    
1003
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1004
                            help="Hypervisor and hypervisor options, in the"
1005
                            " format hypervisor:option=value,option=value,...",
1006
                            default=None, type="identkeyval")
1007

    
1008
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1009
                        help="Hypervisor and hypervisor options, in the"
1010
                        " format hypervisor:option=value,option=value,...",
1011
                        default=[], action="append", type="identkeyval")
1012

    
1013
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1014
                           action="store_false",
1015
                           help="Don't check that the instance's IP"
1016
                           " is alive")
1017

    
1018
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1019
                             default=True, action="store_false",
1020
                             help="Don't check that the instance's name"
1021
                             " is resolvable")
1022

    
1023
NET_OPT = cli_option("--net",
1024
                     help="NIC parameters", default=[],
1025
                     dest="nics", action="append", type="identkeyval")
1026

    
1027
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1028
                      dest="disks", action="append", type="identkeyval")
1029

    
1030
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1031
                         help="Comma-separated list of disks"
1032
                         " indices to act on (e.g. 0,2) (optional,"
1033
                         " defaults to all disks)")
1034

    
1035
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1036
                         help="Enforces a single-disk configuration using the"
1037
                         " given disk size, in MiB unless a suffix is used",
1038
                         default=None, type="unit", metavar="<size>")
1039

    
1040
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1041
                                dest="ignore_consistency",
1042
                                action="store_true", default=False,
1043
                                help="Ignore the consistency of the disks on"
1044
                                " the secondary")
1045

    
1046
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1047
                                dest="allow_failover",
1048
                                action="store_true", default=False,
1049
                                help="If migration is not possible fallback to"
1050
                                     " failover")
1051

    
1052
NONLIVE_OPT = cli_option("--non-live", dest="live",
1053
                         default=True, action="store_false",
1054
                         help="Do a non-live migration (this usually means"
1055
                         " freeze the instance, save the state, transfer and"
1056
                         " only then resume running on the secondary node)")
1057

    
1058
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1059
                                default=None,
1060
                                choices=list(constants.HT_MIGRATION_MODES),
1061
                                help="Override default migration mode (choose"
1062
                                " either live or non-live")
1063

    
1064
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1065
                                help="Target node and optional secondary node",
1066
                                metavar="<pnode>[:<snode>]",
1067
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1068

    
1069
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1070
                           action="append", metavar="<node>",
1071
                           help="Use only this node (can be used multiple"
1072
                           " times, if not given defaults to all nodes)",
1073
                           completion_suggest=OPT_COMPL_ONE_NODE)
1074

    
1075
NODEGROUP_OPT_NAME = "--node-group"
1076
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1077
                           dest="nodegroup",
1078
                           help="Node group (name or uuid)",
1079
                           metavar="<nodegroup>",
1080
                           default=None, type="string",
1081
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1082

    
1083
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1084
                             metavar="<node>",
1085
                             completion_suggest=OPT_COMPL_ONE_NODE)
1086

    
1087
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1088
                         action="store_false",
1089
                         help="Don't start the instance after creation")
1090

    
1091
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1092
                         action="store_true", default=False,
1093
                         help="Show command instead of executing it")
1094

    
1095
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1096
                         default=False, action="store_true",
1097
                         help="Instead of performing the migration, try to"
1098
                         " recover from a failed cleanup. This is safe"
1099
                         " to run even if the instance is healthy, but it"
1100
                         " will create extra replication traffic and "
1101
                         " disrupt briefly the replication (like during the"
1102
                         " migration")
1103

    
1104
STATIC_OPT = cli_option("-s", "--static", dest="static",
1105
                        action="store_true", default=False,
1106
                        help="Only show configuration data, not runtime data")
1107

    
1108
ALL_OPT = cli_option("--all", dest="show_all",
1109
                     default=False, action="store_true",
1110
                     help="Show info on all instances on the cluster."
1111
                     " This can take a long time to run, use wisely")
1112

    
1113
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1114
                           action="store_true", default=False,
1115
                           help="Interactive OS reinstall, lists available"
1116
                           " OS templates for selection")
1117

    
1118
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1119
                                 action="store_true", default=False,
1120
                                 help="Remove the instance from the cluster"
1121
                                 " configuration even if there are failures"
1122
                                 " during the removal process")
1123

    
1124
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1125
                                        dest="ignore_remove_failures",
1126
                                        action="store_true", default=False,
1127
                                        help="Remove the instance from the"
1128
                                        " cluster configuration even if there"
1129
                                        " are failures during the removal"
1130
                                        " process")
1131

    
1132
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1133
                                 action="store_true", default=False,
1134
                                 help="Remove the instance from the cluster")
1135

    
1136
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1137
                               help="Specifies the new node for the instance",
1138
                               metavar="NODE", default=None,
1139
                               completion_suggest=OPT_COMPL_ONE_NODE)
1140

    
1141
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1142
                               help="Specifies the new secondary node",
1143
                               metavar="NODE", default=None,
1144
                               completion_suggest=OPT_COMPL_ONE_NODE)
1145

    
1146
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1147
                             help="Specifies the new primary node",
1148
                             metavar="<node>", default=None,
1149
                             completion_suggest=OPT_COMPL_ONE_NODE)
1150

    
1151
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1152
                            default=False, action="store_true",
1153
                            help="Replace the disk(s) on the primary"
1154
                                 " node (applies only to internally mirrored"
1155
                                 " disk templates, e.g. %s)" %
1156
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1157

    
1158
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1159
                              default=False, action="store_true",
1160
                              help="Replace the disk(s) on the secondary"
1161
                                   " node (applies only to internally mirrored"
1162
                                   " disk templates, e.g. %s)" %
1163
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1164

    
1165
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1166
                              default=False, action="store_true",
1167
                              help="Lock all nodes and auto-promote as needed"
1168
                              " to MC status")
1169

    
1170
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1171
                              default=False, action="store_true",
1172
                              help="Automatically replace faulty disks"
1173
                                   " (applies only to internally mirrored"
1174
                                   " disk templates, e.g. %s)" %
1175
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1176

    
1177
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1178
                             default=False, action="store_true",
1179
                             help="Ignore current recorded size"
1180
                             " (useful for forcing activation when"
1181
                             " the recorded size is wrong)")
1182

    
1183
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1184
                          metavar="<node>",
1185
                          completion_suggest=OPT_COMPL_ONE_NODE)
1186

    
1187
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1188
                         metavar="<dir>")
1189

    
1190
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1191
                              help="Specify the secondary ip for the node",
1192
                              metavar="ADDRESS", default=None)
1193

    
1194
READD_OPT = cli_option("--readd", dest="readd",
1195
                       default=False, action="store_true",
1196
                       help="Readd old node after replacing it")
1197

    
1198
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1199
                                default=True, action="store_false",
1200
                                help="Disable SSH key fingerprint checking")
1201

    
1202
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1203
                                 default=False, action="store_true",
1204
                                 help="Force the joining of a node")
1205

    
1206
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1207
                    type="bool", default=None, metavar=_YORNO,
1208
                    help="Set the master_candidate flag on the node")
1209

    
1210
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1211
                         type="bool", default=None,
1212
                         help=("Set the offline flag on the node"
1213
                               " (cluster does not communicate with offline"
1214
                               " nodes)"))
1215

    
1216
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1217
                         type="bool", default=None,
1218
                         help=("Set the drained flag on the node"
1219
                               " (excluded from allocation operations)"))
1220

    
1221
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1222
                              type="bool", default=None, metavar=_YORNO,
1223
                              help="Set the master_capable flag on the node")
1224

    
1225
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1226
                          type="bool", default=None, metavar=_YORNO,
1227
                          help="Set the vm_capable flag on the node")
1228

    
1229
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1230
                             type="bool", default=None, metavar=_YORNO,
1231
                             help="Set the allocatable flag on a volume")
1232

    
1233
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1234
                               help="Disable support for lvm based instances"
1235
                               " (cluster-wide)",
1236
                               action="store_false", default=True)
1237

    
1238
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1239
                            dest="enabled_hypervisors",
1240
                            help="Comma-separated list of hypervisors",
1241
                            type="string", default=None)
1242

    
1243
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1244
                                        dest="enabled_disk_templates",
1245
                                        help="Comma-separated list of "
1246
                                             "disk templates",
1247
                                        type="string", default=None)
1248

    
1249
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1250
                            type="keyval", default={},
1251
                            help="NIC parameters")
1252

    
1253
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1254
                         dest="candidate_pool_size", type="int",
1255
                         help="Set the candidate pool size")
1256

    
1257
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1258
                         help=("Enables LVM and specifies the volume group"
1259
                               " name (cluster-wide) for disk allocation"
1260
                               " [%s]" % constants.DEFAULT_VG),
1261
                         metavar="VG", default=None)
1262

    
1263
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1264
                          help="Destroy cluster", action="store_true")
1265

    
1266
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1267
                          help="Skip node agreement check (dangerous)",
1268
                          action="store_true", default=False)
1269

    
1270
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1271
                            help="Specify the mac prefix for the instance IP"
1272
                            " addresses, in the format XX:XX:XX",
1273
                            metavar="PREFIX",
1274
                            default=None)
1275

    
1276
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1277
                               help="Specify the node interface (cluster-wide)"
1278
                               " on which the master IP address will be added"
1279
                               " (cluster init default: %s)" %
1280
                               constants.DEFAULT_BRIDGE,
1281
                               metavar="NETDEV",
1282
                               default=None)
1283

    
1284
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1285
                                help="Specify the netmask of the master IP",
1286
                                metavar="NETMASK",
1287
                                default=None)
1288

    
1289
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1290
                                     dest="use_external_mip_script",
1291
                                     help="Specify whether to run a"
1292
                                     " user-provided script for the master"
1293
                                     " IP address turnup and"
1294
                                     " turndown operations",
1295
                                     type="bool", metavar=_YORNO, default=None)
1296

    
1297
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1298
                                help="Specify the default directory (cluster-"
1299
                                "wide) for storing the file-based disks [%s]" %
1300
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1301
                                metavar="DIR",
1302
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1303

    
1304
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1305
  "--shared-file-storage-dir",
1306
  dest="shared_file_storage_dir",
1307
  help="Specify the default directory (cluster-wide) for storing the"
1308
  " shared file-based disks [%s]" %
1309
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1310
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1311

    
1312
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1313
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1314
                                   action="store_false", default=True)
1315

    
1316
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1317
                                    help="Don't initialize SSH keys",
1318
                                    action="store_false", default=True)
1319

    
1320
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1321
                             help="Enable parseable error messages",
1322
                             action="store_true", default=False)
1323

    
1324
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1325
                          help="Skip N+1 memory redundancy tests",
1326
                          action="store_true", default=False)
1327

    
1328
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1329
                             help="Type of reboot: soft/hard/full",
1330
                             default=constants.INSTANCE_REBOOT_HARD,
1331
                             metavar="<REBOOT>",
1332
                             choices=list(constants.REBOOT_TYPES))
1333

    
1334
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1335
                                    dest="ignore_secondaries",
1336
                                    default=False, action="store_true",
1337
                                    help="Ignore errors from secondaries")
1338

    
1339
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1340
                            action="store_false", default=True,
1341
                            help="Don't shutdown the instance (unsafe)")
1342

    
1343
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1344
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1345
                         help="Maximum time to wait")
1346

    
1347
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1348
                                  dest="shutdown_timeout", type="int",
1349
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1350
                                  help="Maximum time to wait for instance"
1351
                                  " shutdown")
1352

    
1353
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1354
                          default=None,
1355
                          help=("Number of seconds between repetions of the"
1356
                                " command"))
1357

    
1358
EARLY_RELEASE_OPT = cli_option("--early-release",
1359
                               dest="early_release", default=False,
1360
                               action="store_true",
1361
                               help="Release the locks on the secondary"
1362
                               " node(s) early")
1363

    
1364
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1365
                                  dest="new_cluster_cert",
1366
                                  default=False, action="store_true",
1367
                                  help="Generate a new cluster certificate")
1368

    
1369
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1370
                           default=None,
1371
                           help="File containing new RAPI certificate")
1372

    
1373
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1374
                               default=None, action="store_true",
1375
                               help=("Generate a new self-signed RAPI"
1376
                                     " certificate"))
1377

    
1378
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1379
                            default=None,
1380
                            help="File containing new SPICE certificate")
1381

    
1382
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1383
                              default=None,
1384
                              help="File containing the certificate of the CA"
1385
                              " which signed the SPICE certificate")
1386

    
1387
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1388
                                dest="new_spice_cert", default=None,
1389
                                action="store_true",
1390
                                help=("Generate a new self-signed SPICE"
1391
                                      " certificate"))
1392

    
1393
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1394
                                    dest="new_confd_hmac_key",
1395
                                    default=False, action="store_true",
1396
                                    help=("Create a new HMAC key for %s" %
1397
                                          constants.CONFD))
1398

    
1399
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1400
                                       dest="cluster_domain_secret",
1401
                                       default=None,
1402
                                       help=("Load new new cluster domain"
1403
                                             " secret from file"))
1404

    
1405
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1406
                                           dest="new_cluster_domain_secret",
1407
                                           default=False, action="store_true",
1408
                                           help=("Create a new cluster domain"
1409
                                                 " secret"))
1410

    
1411
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1412
                              dest="use_replication_network",
1413
                              help="Whether to use the replication network"
1414
                              " for talking to the nodes",
1415
                              action="store_true", default=False)
1416

    
1417
MAINTAIN_NODE_HEALTH_OPT = \
1418
    cli_option("--maintain-node-health", dest="maintain_node_health",
1419
               metavar=_YORNO, default=None, type="bool",
1420
               help="Configure the cluster to automatically maintain node"
1421
               " health, by shutting down unknown instances, shutting down"
1422
               " unknown DRBD devices, etc.")
1423

    
1424
IDENTIFY_DEFAULTS_OPT = \
1425
    cli_option("--identify-defaults", dest="identify_defaults",
1426
               default=False, action="store_true",
1427
               help="Identify which saved instance parameters are equal to"
1428
               " the current cluster defaults and set them as such, instead"
1429
               " of marking them as overridden")
1430

    
1431
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1432
                         action="store", dest="uid_pool",
1433
                         help=("A list of user-ids or user-id"
1434
                               " ranges separated by commas"))
1435

    
1436
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1437
                          action="store", dest="add_uids",
1438
                          help=("A list of user-ids or user-id"
1439
                                " ranges separated by commas, to be"
1440
                                " added to the user-id pool"))
1441

    
1442
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1443
                             action="store", dest="remove_uids",
1444
                             help=("A list of user-ids or user-id"
1445
                                   " ranges separated by commas, to be"
1446
                                   " removed from the user-id pool"))
1447

    
1448
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1449
                              action="store", dest="reserved_lvs",
1450
                              help=("A comma-separated list of reserved"
1451
                                    " logical volumes names, that will be"
1452
                                    " ignored by cluster verify"))
1453

    
1454
ROMAN_OPT = cli_option("--roman",
1455
                       dest="roman_integers", default=False,
1456
                       action="store_true",
1457
                       help="Use roman numbers for positive integers")
1458

    
1459
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1460
                             action="store", default=None,
1461
                             help="Specifies usermode helper for DRBD")
1462

    
1463
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1464
                                action="store_false", default=True,
1465
                                help="Disable support for DRBD")
1466

    
1467
PRIMARY_IP_VERSION_OPT = \
1468
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1469
               action="store", dest="primary_ip_version",
1470
               metavar="%d|%d" % (constants.IP4_VERSION,
1471
                                  constants.IP6_VERSION),
1472
               help="Cluster-wide IP version for primary IP")
1473

    
1474
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1475
                              action="store_true",
1476
                              help="Show machine name for every line in output")
1477

    
1478
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1479
                              action="store_true",
1480
                              help=("Hide successful results and show failures"
1481
                                    " only (determined by the exit code)"))
1482

    
1483
REASON_OPT = cli_option("--reason", default=None,
1484
                        help="The reason for executing the command")
1485

    
1486

    
1487
def _PriorityOptionCb(option, _, value, parser):
1488
  """Callback for processing C{--priority} option.
1489

1490
  """
1491
  value = _PRIONAME_TO_VALUE[value]
1492

    
1493
  setattr(parser.values, option.dest, value)
1494

    
1495

    
1496
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1497
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1498
                          choices=_PRIONAME_TO_VALUE.keys(),
1499
                          action="callback", type="choice",
1500
                          callback=_PriorityOptionCb,
1501
                          help="Priority for opcode processing")
1502

    
1503
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1504
                        type="bool", default=None, metavar=_YORNO,
1505
                        help="Sets the hidden flag on the OS")
1506

    
1507
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1508
                        type="bool", default=None, metavar=_YORNO,
1509
                        help="Sets the blacklisted flag on the OS")
1510

    
1511
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1512
                                     type="bool", metavar=_YORNO,
1513
                                     dest="prealloc_wipe_disks",
1514
                                     help=("Wipe disks prior to instance"
1515
                                           " creation"))
1516

    
1517
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1518
                             type="keyval", default=None,
1519
                             help="Node parameters")
1520

    
1521
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1522
                              action="store", metavar="POLICY", default=None,
1523
                              help="Allocation policy for the node group")
1524

    
1525
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1526
                              type="bool", metavar=_YORNO,
1527
                              dest="node_powered",
1528
                              help="Specify if the SoR for node is powered")
1529

    
1530
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1531
                             default=constants.OOB_TIMEOUT,
1532
                             help="Maximum time to wait for out-of-band helper")
1533

    
1534
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1535
                             default=constants.OOB_POWER_DELAY,
1536
                             help="Time in seconds to wait between power-ons")
1537

    
1538
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1539
                              action="store_true", default=False,
1540
                              help=("Whether command argument should be treated"
1541
                                    " as filter"))
1542

    
1543
NO_REMEMBER_OPT = cli_option("--no-remember",
1544
                             dest="no_remember",
1545
                             action="store_true", default=False,
1546
                             help="Perform but do not record the change"
1547
                             " in the configuration")
1548

    
1549
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1550
                              default=False, action="store_true",
1551
                              help="Evacuate primary instances only")
1552

    
1553
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1554
                                default=False, action="store_true",
1555
                                help="Evacuate secondary instances only"
1556
                                     " (applies only to internally mirrored"
1557
                                     " disk templates, e.g. %s)" %
1558
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1559

    
1560
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1561
                                action="store_true", default=False,
1562
                                help="Pause instance at startup")
1563

    
1564
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1565
                          help="Destination node group (name or uuid)",
1566
                          default=None, action="append",
1567
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1568

    
1569
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1570
                               action="append", dest="ignore_errors",
1571
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1572
                               help="Error code to be ignored")
1573

    
1574
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1575
                            action="append",
1576
                            help=("Specify disk state information in the"
1577
                                  " format"
1578
                                  " storage_type/identifier:option=value,...;"
1579
                                  " note this is unused for now"),
1580
                            type="identkeyval")
1581

    
1582
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1583
                          action="append",
1584
                          help=("Specify hypervisor state information in the"
1585
                                " format hypervisor:option=value,...;"
1586
                                " note this is unused for now"),
1587
                          type="identkeyval")
1588

    
1589
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1590
                                action="store_true", default=False,
1591
                                help="Ignore instance policy violations")
1592

    
1593
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1594
                             help="Sets the instance's runtime memory,"
1595
                             " ballooning it up or down to the new value",
1596
                             default=None, type="unit", metavar="<size>")
1597

    
1598
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1599
                          action="store_true", default=False,
1600
                          help="Marks the grow as absolute instead of the"
1601
                          " (default) relative mode")
1602

    
1603
NETWORK_OPT = cli_option("--network",
1604
                         action="store", default=None, dest="network",
1605
                         help="IP network in CIDR notation")
1606

    
1607
GATEWAY_OPT = cli_option("--gateway",
1608
                         action="store", default=None, dest="gateway",
1609
                         help="IP address of the router (gateway)")
1610

    
1611
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1612
                                  action="store", default=None,
1613
                                  dest="add_reserved_ips",
1614
                                  help="Comma-separated list of"
1615
                                  " reserved IPs to add")
1616

    
1617
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1618
                                     action="store", default=None,
1619
                                     dest="remove_reserved_ips",
1620
                                     help="Comma-delimited list of"
1621
                                     " reserved IPs to remove")
1622

    
1623
NETWORK6_OPT = cli_option("--network6",
1624
                          action="store", default=None, dest="network6",
1625
                          help="IP network in CIDR notation")
1626

    
1627
GATEWAY6_OPT = cli_option("--gateway6",
1628
                          action="store", default=None, dest="gateway6",
1629
                          help="IP6 address of the router (gateway)")
1630

    
1631
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1632
                                  dest="conflicts_check",
1633
                                  default=True,
1634
                                  action="store_false",
1635
                                  help="Don't check for conflicting IPs")
1636

    
1637
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1638
                                 default=False, action="store_true",
1639
                                 help="Include default values")
1640

    
1641
#: Options provided by all commands
1642
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1643

    
1644
# options related to asynchronous job handling
1645

    
1646
SUBMIT_OPTS = [
1647
  SUBMIT_OPT,
1648
  PRINT_JOBID_OPT,
1649
  ]
1650

    
1651
# common options for creating instances. add and import then add their own
1652
# specific ones.
1653
COMMON_CREATE_OPTS = [
1654
  BACKEND_OPT,
1655
  DISK_OPT,
1656
  DISK_TEMPLATE_OPT,
1657
  FILESTORE_DIR_OPT,
1658
  FILESTORE_DRIVER_OPT,
1659
  HYPERVISOR_OPT,
1660
  IALLOCATOR_OPT,
1661
  NET_OPT,
1662
  NODE_PLACEMENT_OPT,
1663
  NOIPCHECK_OPT,
1664
  NOCONFLICTSCHECK_OPT,
1665
  NONAMECHECK_OPT,
1666
  NONICS_OPT,
1667
  NWSYNC_OPT,
1668
  OSPARAMS_OPT,
1669
  OS_SIZE_OPT,
1670
  SUBMIT_OPT,
1671
  PRINT_JOBID_OPT,
1672
  TAG_ADD_OPT,
1673
  DRY_RUN_OPT,
1674
  PRIORITY_OPT,
1675
  ]
1676

    
1677
# common instance policy options
1678
INSTANCE_POLICY_OPTS = [
1679
  IPOLICY_BOUNDS_SPECS_OPT,
1680
  IPOLICY_DISK_TEMPLATES,
1681
  IPOLICY_VCPU_RATIO,
1682
  IPOLICY_SPINDLE_RATIO,
1683
  ]
1684

    
1685
# instance policy split specs options
1686
SPLIT_ISPECS_OPTS = [
1687
  SPECS_CPU_COUNT_OPT,
1688
  SPECS_DISK_COUNT_OPT,
1689
  SPECS_DISK_SIZE_OPT,
1690
  SPECS_MEM_SIZE_OPT,
1691
  SPECS_NIC_COUNT_OPT,
1692
  ]
1693

    
1694

    
1695
class _ShowUsage(Exception):
1696
  """Exception class for L{_ParseArgs}.
1697

1698
  """
1699
  def __init__(self, exit_error):
1700
    """Initializes instances of this class.
1701

1702
    @type exit_error: bool
1703
    @param exit_error: Whether to report failure on exit
1704

1705
    """
1706
    Exception.__init__(self)
1707
    self.exit_error = exit_error
1708

    
1709

    
1710
class _ShowVersion(Exception):
1711
  """Exception class for L{_ParseArgs}.
1712

1713
  """
1714

    
1715

    
1716
def _ParseArgs(binary, argv, commands, aliases, env_override):
1717
  """Parser for the command line arguments.
1718

1719
  This function parses the arguments and returns the function which
1720
  must be executed together with its (modified) arguments.
1721

1722
  @param binary: Script name
1723
  @param argv: Command line arguments
1724
  @param commands: Dictionary containing command definitions
1725
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1726
  @param env_override: list of env variables allowed for default args
1727
  @raise _ShowUsage: If usage description should be shown
1728
  @raise _ShowVersion: If version should be shown
1729

1730
  """
1731
  assert not (env_override - set(commands))
1732
  assert not (set(aliases.keys()) & set(commands.keys()))
1733

    
1734
  if len(argv) > 1:
1735
    cmd = argv[1]
1736
  else:
1737
    # No option or command given
1738
    raise _ShowUsage(exit_error=True)
1739

    
1740
  if cmd == "--version":
1741
    raise _ShowVersion()
1742
  elif cmd == "--help":
1743
    raise _ShowUsage(exit_error=False)
1744
  elif not (cmd in commands or cmd in aliases):
1745
    raise _ShowUsage(exit_error=True)
1746

    
1747
  # get command, unalias it, and look it up in commands
1748
  if cmd in aliases:
1749
    if aliases[cmd] not in commands:
1750
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1751
                                   " command '%s'" % (cmd, aliases[cmd]))
1752

    
1753
    cmd = aliases[cmd]
1754

    
1755
  if cmd in env_override:
1756
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1757
    env_args = os.environ.get(args_env_name)
1758
    if env_args:
1759
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1760

    
1761
  func, args_def, parser_opts, usage, description = commands[cmd]
1762
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1763
                        description=description,
1764
                        formatter=TitledHelpFormatter(),
1765
                        usage="%%prog %s %s" % (cmd, usage))
1766
  parser.disable_interspersed_args()
1767
  options, args = parser.parse_args(args=argv[2:])
1768

    
1769
  if not _CheckArguments(cmd, args_def, args):
1770
    return None, None, None
1771

    
1772
  return func, options, args
1773

    
1774

    
1775
def _FormatUsage(binary, commands):
1776
  """Generates a nice description of all commands.
1777

1778
  @param binary: Script name
1779
  @param commands: Dictionary containing command definitions
1780

1781
  """
1782
  # compute the max line length for cmd + usage
1783
  mlen = min(60, max(map(len, commands)))
1784

    
1785
  yield "Usage: %s {command} [options...] [argument...]" % binary
1786
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1787
  yield ""
1788
  yield "Commands:"
1789

    
1790
  # and format a nice command list
1791
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1792
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1793
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1794
    for line in help_lines:
1795
      yield " %-*s   %s" % (mlen, "", line)
1796

    
1797
  yield ""
1798

    
1799

    
1800
def _CheckArguments(cmd, args_def, args):
1801
  """Verifies the arguments using the argument definition.
1802

1803
  Algorithm:
1804

1805
    1. Abort with error if values specified by user but none expected.
1806

1807
    1. For each argument in definition
1808

1809
      1. Keep running count of minimum number of values (min_count)
1810
      1. Keep running count of maximum number of values (max_count)
1811
      1. If it has an unlimited number of values
1812

1813
        1. Abort with error if it's not the last argument in the definition
1814

1815
    1. If last argument has limited number of values
1816

1817
      1. Abort with error if number of values doesn't match or is too large
1818

1819
    1. Abort with error if user didn't pass enough values (min_count)
1820

1821
  """
1822
  if args and not args_def:
1823
    ToStderr("Error: Command %s expects no arguments", cmd)
1824
    return False
1825

    
1826
  min_count = None
1827
  max_count = None
1828
  check_max = None
1829

    
1830
  last_idx = len(args_def) - 1
1831

    
1832
  for idx, arg in enumerate(args_def):
1833
    if min_count is None:
1834
      min_count = arg.min
1835
    elif arg.min is not None:
1836
      min_count += arg.min
1837

    
1838
    if max_count is None:
1839
      max_count = arg.max
1840
    elif arg.max is not None:
1841
      max_count += arg.max
1842

    
1843
    if idx == last_idx:
1844
      check_max = (arg.max is not None)
1845

    
1846
    elif arg.max is None:
1847
      raise errors.ProgrammerError("Only the last argument can have max=None")
1848

    
1849
  if check_max:
1850
    # Command with exact number of arguments
1851
    if (min_count is not None and max_count is not None and
1852
        min_count == max_count and len(args) != min_count):
1853
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1854
      return False
1855

    
1856
    # Command with limited number of arguments
1857
    if max_count is not None and len(args) > max_count:
1858
      ToStderr("Error: Command %s expects only %d argument(s)",
1859
               cmd, max_count)
1860
      return False
1861

    
1862
  # Command with some required arguments
1863
  if min_count is not None and len(args) < min_count:
1864
    ToStderr("Error: Command %s expects at least %d argument(s)",
1865
             cmd, min_count)
1866
    return False
1867

    
1868
  return True
1869

    
1870

    
1871
def SplitNodeOption(value):
1872
  """Splits the value of a --node option.
1873

1874
  """
1875
  if value and ":" in value:
1876
    return value.split(":", 1)
1877
  else:
1878
    return (value, None)
1879

    
1880

    
1881
def CalculateOSNames(os_name, os_variants):
1882
  """Calculates all the names an OS can be called, according to its variants.
1883

1884
  @type os_name: string
1885
  @param os_name: base name of the os
1886
  @type os_variants: list or None
1887
  @param os_variants: list of supported variants
1888
  @rtype: list
1889
  @return: list of valid names
1890

1891
  """
1892
  if os_variants:
1893
    return ["%s+%s" % (os_name, v) for v in os_variants]
1894
  else:
1895
    return [os_name]
1896

    
1897

    
1898
def ParseFields(selected, default):
1899
  """Parses the values of "--field"-like options.
1900

1901
  @type selected: string or None
1902
  @param selected: User-selected options
1903
  @type default: list
1904
  @param default: Default fields
1905

1906
  """
1907
  if selected is None:
1908
    return default
1909

    
1910
  if selected.startswith("+"):
1911
    return default + selected[1:].split(",")
1912

    
1913
  return selected.split(",")
1914

    
1915

    
1916
UsesRPC = rpc.RunWithRPC
1917

    
1918

    
1919
def AskUser(text, choices=None):
1920
  """Ask the user a question.
1921

1922
  @param text: the question to ask
1923

1924
  @param choices: list with elements tuples (input_char, return_value,
1925
      description); if not given, it will default to: [('y', True,
1926
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1927
      note that the '?' char is reserved for help
1928

1929
  @return: one of the return values from the choices list; if input is
1930
      not possible (i.e. not running with a tty, we return the last
1931
      entry from the list
1932

1933
  """
1934
  if choices is None:
1935
    choices = [("y", True, "Perform the operation"),
1936
               ("n", False, "Do not perform the operation")]
1937
  if not choices or not isinstance(choices, list):
1938
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1939
  for entry in choices:
1940
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1941
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1942

    
1943
  answer = choices[-1][1]
1944
  new_text = []
1945
  for line in text.splitlines():
1946
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1947
  text = "\n".join(new_text)
1948
  try:
1949
    f = file("/dev/tty", "a+")
1950
  except IOError:
1951
    return answer
1952
  try:
1953
    chars = [entry[0] for entry in choices]
1954
    chars[-1] = "[%s]" % chars[-1]
1955
    chars.append("?")
1956
    maps = dict([(entry[0], entry[1]) for entry in choices])
1957
    while True:
1958
      f.write(text)
1959
      f.write("\n")
1960
      f.write("/".join(chars))
1961
      f.write(": ")
1962
      line = f.readline(2).strip().lower()
1963
      if line in maps:
1964
        answer = maps[line]
1965
        break
1966
      elif line == "?":
1967
        for entry in choices:
1968
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1969
        f.write("\n")
1970
        continue
1971
  finally:
1972
    f.close()
1973
  return answer
1974

    
1975

    
1976
class JobSubmittedException(Exception):
1977
  """Job was submitted, client should exit.
1978

1979
  This exception has one argument, the ID of the job that was
1980
  submitted. The handler should print this ID.
1981

1982
  This is not an error, just a structured way to exit from clients.
1983

1984
  """
1985

    
1986

    
1987
def SendJob(ops, cl=None):
1988
  """Function to submit an opcode without waiting for the results.
1989

1990
  @type ops: list
1991
  @param ops: list of opcodes
1992
  @type cl: luxi.Client
1993
  @param cl: the luxi client to use for communicating with the master;
1994
             if None, a new client will be created
1995

1996
  """
1997
  if cl is None:
1998
    cl = GetClient()
1999

    
2000
  job_id = cl.SubmitJob(ops)
2001

    
2002
  return job_id
2003

    
2004

    
2005
def GenericPollJob(job_id, cbs, report_cbs):
2006
  """Generic job-polling function.
2007

2008
  @type job_id: number
2009
  @param job_id: Job ID
2010
  @type cbs: Instance of L{JobPollCbBase}
2011
  @param cbs: Data callbacks
2012
  @type report_cbs: Instance of L{JobPollReportCbBase}
2013
  @param report_cbs: Reporting callbacks
2014

2015
  """
2016
  prev_job_info = None
2017
  prev_logmsg_serial = None
2018

    
2019
  status = None
2020

    
2021
  while True:
2022
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2023
                                      prev_logmsg_serial)
2024
    if not result:
2025
      # job not found, go away!
2026
      raise errors.JobLost("Job with id %s lost" % job_id)
2027

    
2028
    if result == constants.JOB_NOTCHANGED:
2029
      report_cbs.ReportNotChanged(job_id, status)
2030

    
2031
      # Wait again
2032
      continue
2033

    
2034
    # Split result, a tuple of (field values, log entries)
2035
    (job_info, log_entries) = result
2036
    (status, ) = job_info
2037

    
2038
    if log_entries:
2039
      for log_entry in log_entries:
2040
        (serial, timestamp, log_type, message) = log_entry
2041
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2042
                                    log_type, message)
2043
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2044

    
2045
    # TODO: Handle canceled and archived jobs
2046
    elif status in (constants.JOB_STATUS_SUCCESS,
2047
                    constants.JOB_STATUS_ERROR,
2048
                    constants.JOB_STATUS_CANCELING,
2049
                    constants.JOB_STATUS_CANCELED):
2050
      break
2051

    
2052
    prev_job_info = job_info
2053

    
2054
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2055
  if not jobs:
2056
    raise errors.JobLost("Job with id %s lost" % job_id)
2057

    
2058
  status, opstatus, result = jobs[0]
2059

    
2060
  if status == constants.JOB_STATUS_SUCCESS:
2061
    return result
2062

    
2063
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2064
    raise errors.OpExecError("Job was canceled")
2065

    
2066
  has_ok = False
2067
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2068
    if status == constants.OP_STATUS_SUCCESS:
2069
      has_ok = True
2070
    elif status == constants.OP_STATUS_ERROR:
2071
      errors.MaybeRaise(msg)
2072

    
2073
      if has_ok:
2074
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2075
                                 (idx, msg))
2076

    
2077
      raise errors.OpExecError(str(msg))
2078

    
2079
  # default failure mode
2080
  raise errors.OpExecError(result)
2081

    
2082

    
2083
class JobPollCbBase:
2084
  """Base class for L{GenericPollJob} callbacks.
2085

2086
  """
2087
  def __init__(self):
2088
    """Initializes this class.
2089

2090
    """
2091

    
2092
  def WaitForJobChangeOnce(self, job_id, fields,
2093
                           prev_job_info, prev_log_serial):
2094
    """Waits for changes on a job.
2095

2096
    """
2097
    raise NotImplementedError()
2098

    
2099
  def QueryJobs(self, job_ids, fields):
2100
    """Returns the selected fields for the selected job IDs.
2101

2102
    @type job_ids: list of numbers
2103
    @param job_ids: Job IDs
2104
    @type fields: list of strings
2105
    @param fields: Fields
2106

2107
    """
2108
    raise NotImplementedError()
2109

    
2110

    
2111
class JobPollReportCbBase:
2112
  """Base class for L{GenericPollJob} reporting callbacks.
2113

2114
  """
2115
  def __init__(self):
2116
    """Initializes this class.
2117

2118
    """
2119

    
2120
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2121
    """Handles a log message.
2122

2123
    """
2124
    raise NotImplementedError()
2125

    
2126
  def ReportNotChanged(self, job_id, status):
2127
    """Called for if a job hasn't changed in a while.
2128

2129
    @type job_id: number
2130
    @param job_id: Job ID
2131
    @type status: string or None
2132
    @param status: Job status if available
2133

2134
    """
2135
    raise NotImplementedError()
2136

    
2137

    
2138
class _LuxiJobPollCb(JobPollCbBase):
2139
  def __init__(self, cl):
2140
    """Initializes this class.
2141

2142
    """
2143
    JobPollCbBase.__init__(self)
2144
    self.cl = cl
2145

    
2146
  def WaitForJobChangeOnce(self, job_id, fields,
2147
                           prev_job_info, prev_log_serial):
2148
    """Waits for changes on a job.
2149

2150
    """
2151
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2152
                                        prev_job_info, prev_log_serial)
2153

    
2154
  def QueryJobs(self, job_ids, fields):
2155
    """Returns the selected fields for the selected job IDs.
2156

2157
    """
2158
    return self.cl.QueryJobs(job_ids, fields)
2159

    
2160

    
2161
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2162
  def __init__(self, feedback_fn):
2163
    """Initializes this class.
2164

2165
    """
2166
    JobPollReportCbBase.__init__(self)
2167

    
2168
    self.feedback_fn = feedback_fn
2169

    
2170
    assert callable(feedback_fn)
2171

    
2172
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2173
    """Handles a log message.
2174

2175
    """
2176
    self.feedback_fn((timestamp, log_type, log_msg))
2177

    
2178
  def ReportNotChanged(self, job_id, status):
2179
    """Called if a job hasn't changed in a while.
2180

2181
    """
2182
    # Ignore
2183

    
2184

    
2185
class StdioJobPollReportCb(JobPollReportCbBase):
2186
  def __init__(self):
2187
    """Initializes this class.
2188

2189
    """
2190
    JobPollReportCbBase.__init__(self)
2191

    
2192
    self.notified_queued = False
2193
    self.notified_waitlock = False
2194

    
2195
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2196
    """Handles a log message.
2197

2198
    """
2199
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2200
             FormatLogMessage(log_type, log_msg))
2201

    
2202
  def ReportNotChanged(self, job_id, status):
2203
    """Called if a job hasn't changed in a while.
2204

2205
    """
2206
    if status is None:
2207
      return
2208

    
2209
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2210
      ToStderr("Job %s is waiting in queue", job_id)
2211
      self.notified_queued = True
2212

    
2213
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2214
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2215
      self.notified_waitlock = True
2216

    
2217

    
2218
def FormatLogMessage(log_type, log_msg):
2219
  """Formats a job message according to its type.
2220

2221
  """
2222
  if log_type != constants.ELOG_MESSAGE:
2223
    log_msg = str(log_msg)
2224

    
2225
  return utils.SafeEncode(log_msg)
2226

    
2227

    
2228
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2229
  """Function to poll for the result of a job.
2230

2231
  @type job_id: job identified
2232
  @param job_id: the job to poll for results
2233
  @type cl: luxi.Client
2234
  @param cl: the luxi client to use for communicating with the master;
2235
             if None, a new client will be created
2236

2237
  """
2238
  if cl is None:
2239
    cl = GetClient()
2240

    
2241
  if reporter is None:
2242
    if feedback_fn:
2243
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2244
    else:
2245
      reporter = StdioJobPollReportCb()
2246
  elif feedback_fn:
2247
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2248

    
2249
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2250

    
2251

    
2252
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2253
  """Legacy function to submit an opcode.
2254

2255
  This is just a simple wrapper over the construction of the processor
2256
  instance. It should be extended to better handle feedback and
2257
  interaction functions.
2258

2259
  """
2260
  if cl is None:
2261
    cl = GetClient()
2262

    
2263
  SetGenericOpcodeOpts([op], opts)
2264

    
2265
  job_id = SendJob([op], cl=cl)
2266
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2267
    ToStdout("%d" % job_id)
2268

    
2269
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2270
                       reporter=reporter)
2271

    
2272
  return op_results[0]
2273

    
2274

    
2275
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2276
  """Wrapper around SubmitOpCode or SendJob.
2277

2278
  This function will decide, based on the 'opts' parameter, whether to
2279
  submit and wait for the result of the opcode (and return it), or
2280
  whether to just send the job and print its identifier. It is used in
2281
  order to simplify the implementation of the '--submit' option.
2282

2283
  It will also process the opcodes if we're sending the via SendJob
2284
  (otherwise SubmitOpCode does it).
2285

2286
  """
2287
  if opts and opts.submit_only:
2288
    job = [op]
2289
    SetGenericOpcodeOpts(job, opts)
2290
    job_id = SendJob(job, cl=cl)
2291
    if opts.print_jobid:
2292
      ToStdout("%d" % job_id)
2293
    raise JobSubmittedException(job_id)
2294
  else:
2295
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2296

    
2297

    
2298
def _InitReasonTrail(op, opts):
2299
  """Builds the first part of the reason trail
2300

2301
  Builds the initial part of the reason trail, adding the user provided reason
2302
  (if it exists) and the name of the command starting the operation.
2303

2304
  @param op: the opcode the reason trail will be added to
2305
  @param opts: the command line options selected by the user
2306

2307
  """
2308
  assert len(sys.argv) >= 2
2309
  trail = []
2310

    
2311
  if opts.reason:
2312
    trail.append((constants.OPCODE_REASON_SRC_USER,
2313
                  opts.reason,
2314
                  utils.EpochNano()))
2315

    
2316
  binary = os.path.basename(sys.argv[0])
2317
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2318
  command = sys.argv[1]
2319
  trail.append((source, command, utils.EpochNano()))
2320
  op.reason = trail
2321

    
2322

    
2323
def SetGenericOpcodeOpts(opcode_list, options):
2324
  """Processor for generic options.
2325

2326
  This function updates the given opcodes based on generic command
2327
  line options (like debug, dry-run, etc.).
2328

2329
  @param opcode_list: list of opcodes
2330
  @param options: command line options or None
2331
  @return: None (in-place modification)
2332

2333
  """
2334
  if not options:
2335
    return
2336
  for op in opcode_list:
2337
    op.debug_level = options.debug
2338
    if hasattr(options, "dry_run"):
2339
      op.dry_run = options.dry_run
2340
    if getattr(options, "priority", None) is not None:
2341
      op.priority = options.priority
2342
    _InitReasonTrail(op, options)
2343

    
2344

    
2345
def GetClient(query=False):
2346
  """Connects to the a luxi socket and returns a client.
2347

2348
  @type query: boolean
2349
  @param query: this signifies that the client will only be
2350
      used for queries; if the build-time parameter
2351
      enable-split-queries is enabled, then the client will be
2352
      connected to the query socket instead of the masterd socket
2353

2354
  """
2355
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2356
  if override_socket:
2357
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2358
      address = pathutils.MASTER_SOCKET
2359
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2360
      address = pathutils.QUERY_SOCKET
2361
    else:
2362
      address = override_socket
2363
  elif query and constants.ENABLE_SPLIT_QUERY:
2364
    address = pathutils.QUERY_SOCKET
2365
  else:
2366
    address = None
2367
  # TODO: Cache object?
2368
  try:
2369
    client = luxi.Client(address=address)
2370
  except luxi.NoMasterError:
2371
    ss = ssconf.SimpleStore()
2372

    
2373
    # Try to read ssconf file
2374
    try:
2375
      ss.GetMasterNode()
2376
    except errors.ConfigurationError:
2377
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2378
                                 " not part of a cluster",
2379
                                 errors.ECODE_INVAL)
2380

    
2381
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2382
    if master != myself:
2383
      raise errors.OpPrereqError("This is not the master node, please connect"
2384
                                 " to node '%s' and rerun the command" %
2385
                                 master, errors.ECODE_INVAL)
2386
    raise
2387
  return client
2388

    
2389

    
2390
def FormatError(err):
2391
  """Return a formatted error message for a given error.
2392

2393
  This function takes an exception instance and returns a tuple
2394
  consisting of two values: first, the recommended exit code, and
2395
  second, a string describing the error message (not
2396
  newline-terminated).
2397

2398
  """
2399
  retcode = 1
2400
  obuf = StringIO()
2401
  msg = str(err)
2402
  if isinstance(err, errors.ConfigurationError):
2403
    txt = "Corrupt configuration file: %s" % msg
2404
    logging.error(txt)
2405
    obuf.write(txt + "\n")
2406
    obuf.write("Aborting.")
2407
    retcode = 2
2408
  elif isinstance(err, errors.HooksAbort):
2409
    obuf.write("Failure: hooks execution failed:\n")
2410
    for node, script, out in err.args[0]:
2411
      if out:
2412
        obuf.write("  node: %s, script: %s, output: %s\n" %
2413
                   (node, script, out))
2414
      else:
2415
        obuf.write("  node: %s, script: %s (no output)\n" %
2416
                   (node, script))
2417
  elif isinstance(err, errors.HooksFailure):
2418
    obuf.write("Failure: hooks general failure: %s" % msg)
2419
  elif isinstance(err, errors.ResolverError):
2420
    this_host = netutils.Hostname.GetSysName()
2421
    if err.args[0] == this_host:
2422
      msg = "Failure: can't resolve my own hostname ('%s')"
2423
    else:
2424
      msg = "Failure: can't resolve hostname '%s'"
2425
    obuf.write(msg % err.args[0])
2426
  elif isinstance(err, errors.OpPrereqError):
2427
    if len(err.args) == 2:
2428
      obuf.write("Failure: prerequisites not met for this"
2429
                 " operation:\nerror type: %s, error details:\n%s" %
2430
                 (err.args[1], err.args[0]))
2431
    else:
2432
      obuf.write("Failure: prerequisites not met for this"
2433
                 " operation:\n%s" % msg)
2434
  elif isinstance(err, errors.OpExecError):
2435
    obuf.write("Failure: command execution error:\n%s" % msg)
2436
  elif isinstance(err, errors.TagError):
2437
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2438
  elif isinstance(err, errors.JobQueueDrainError):
2439
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2440
               " accept new requests\n")
2441
  elif isinstance(err, errors.JobQueueFull):
2442
    obuf.write("Failure: the job queue is full and doesn't accept new"
2443
               " job submissions until old jobs are archived\n")
2444
  elif isinstance(err, errors.TypeEnforcementError):
2445
    obuf.write("Parameter Error: %s" % msg)
2446
  elif isinstance(err, errors.ParameterError):
2447
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2448
  elif isinstance(err, luxi.NoMasterError):
2449
    if err.args[0] == pathutils.MASTER_SOCKET:
2450
      daemon = "the master daemon"
2451
    elif err.args[0] == pathutils.QUERY_SOCKET:
2452
      daemon = "the config daemon"
2453
    else:
2454
      daemon = "socket '%s'" % str(err.args[0])
2455
    obuf.write("Cannot communicate with %s.\nIs the process running"
2456
               " and listening for connections?" % daemon)
2457
  elif isinstance(err, luxi.TimeoutError):
2458
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2459
               " been submitted and will continue to run even if the call"
2460
               " timed out. Useful commands in this situation are \"gnt-job"
2461
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2462
    obuf.write(msg)
2463
  elif isinstance(err, luxi.PermissionError):
2464
    obuf.write("It seems you don't have permissions to connect to the"
2465
               " master daemon.\nPlease retry as a different user.")
2466
  elif isinstance(err, luxi.ProtocolError):
2467
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2468
               "%s" % msg)
2469
  elif isinstance(err, errors.JobLost):
2470
    obuf.write("Error checking job status: %s" % msg)
2471
  elif isinstance(err, errors.QueryFilterParseError):
2472
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2473
    obuf.write("\n".join(err.GetDetails()))
2474
  elif isinstance(err, errors.GenericError):
2475
    obuf.write("Unhandled Ganeti error: %s" % msg)
2476
  elif isinstance(err, JobSubmittedException):
2477
    obuf.write("JobID: %s\n" % err.args[0])
2478
    retcode = 0
2479
  else:
2480
    obuf.write("Unhandled exception: %s" % msg)
2481
  return retcode, obuf.getvalue().rstrip("\n")
2482

    
2483

    
2484
def GenericMain(commands, override=None, aliases=None,
2485
                env_override=frozenset()):
2486
  """Generic main function for all the gnt-* commands.
2487

2488
  @param commands: a dictionary with a special structure, see the design doc
2489
                   for command line handling.
2490
  @param override: if not None, we expect a dictionary with keys that will
2491
                   override command line options; this can be used to pass
2492
                   options from the scripts to generic functions
2493
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2494
  @param env_override: list of environment names which are allowed to submit
2495
                       default args for commands
2496

2497
  """
2498
  # save the program name and the entire command line for later logging
2499
  if sys.argv:
2500
    binary = os.path.basename(sys.argv[0])
2501
    if not binary:
2502
      binary = sys.argv[0]
2503

    
2504
    if len(sys.argv) >= 2:
2505
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2506
    else:
2507
      logname = binary
2508

    
2509
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2510
  else:
2511
    binary = "<unknown program>"
2512
    cmdline = "<unknown>"
2513

    
2514
  if aliases is None:
2515
    aliases = {}
2516

    
2517
  try:
2518
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2519
                                       env_override)
2520
  except _ShowVersion:
2521
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2522
             constants.RELEASE_VERSION)
2523
    return constants.EXIT_SUCCESS
2524
  except _ShowUsage, err:
2525
    for line in _FormatUsage(binary, commands):
2526
      ToStdout(line)
2527

    
2528
    if err.exit_error:
2529
      return constants.EXIT_FAILURE
2530
    else:
2531
      return constants.EXIT_SUCCESS
2532
  except errors.ParameterError, err:
2533
    result, err_msg = FormatError(err)
2534
    ToStderr(err_msg)
2535
    return 1
2536

    
2537
  if func is None: # parse error
2538
    return 1
2539

    
2540
  if override is not None:
2541
    for key, val in override.iteritems():
2542
      setattr(options, key, val)
2543

    
2544
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2545
                     stderr_logging=True)
2546

    
2547
  logging.info("Command line: %s", cmdline)
2548

    
2549
  try:
2550
    result = func(options, args)
2551
  except (errors.GenericError, luxi.ProtocolError,
2552
          JobSubmittedException), err:
2553
    result, err_msg = FormatError(err)
2554
    logging.exception("Error during command processing")
2555
    ToStderr(err_msg)
2556
  except KeyboardInterrupt:
2557
    result = constants.EXIT_FAILURE
2558
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2559
             " might have been submitted and"
2560
             " will continue to run in the background.")
2561
  except IOError, err:
2562
    if err.errno == errno.EPIPE:
2563
      # our terminal went away, we'll exit
2564
      sys.exit(constants.EXIT_FAILURE)
2565
    else:
2566
      raise
2567

    
2568
  return result
2569

    
2570

    
2571
def ParseNicOption(optvalue):
2572
  """Parses the value of the --net option(s).
2573

2574
  """
2575
  try:
2576
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2577
  except (TypeError, ValueError), err:
2578
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2579
                               errors.ECODE_INVAL)
2580

    
2581
  nics = [{}] * nic_max
2582
  for nidx, ndict in optvalue:
2583
    nidx = int(nidx)
2584

    
2585
    if not isinstance(ndict, dict):
2586
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2587
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2588

    
2589
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2590

    
2591
    nics[nidx] = ndict
2592

    
2593
  return nics
2594

    
2595

    
2596
def GenericInstanceCreate(mode, opts, args):
2597
  """Add an instance to the cluster via either creation or import.
2598

2599
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2600
  @param opts: the command line options selected by the user
2601
  @type args: list
2602
  @param args: should contain only one element, the new instance name
2603
  @rtype: int
2604
  @return: the desired exit code
2605

2606
  """
2607
  instance = args[0]
2608

    
2609
  (pnode, snode) = SplitNodeOption(opts.node)
2610

    
2611
  hypervisor = None
2612
  hvparams = {}
2613
  if opts.hypervisor:
2614
    hypervisor, hvparams = opts.hypervisor
2615

    
2616
  if opts.nics:
2617
    nics = ParseNicOption(opts.nics)
2618
  elif opts.no_nics:
2619
    # no nics
2620
    nics = []
2621
  elif mode == constants.INSTANCE_CREATE:
2622
    # default of one nic, all auto
2623
    nics = [{}]
2624
  else:
2625
    # mode == import
2626
    nics = []
2627

    
2628
  if opts.disk_template == constants.DT_DISKLESS:
2629
    if opts.disks or opts.sd_size is not None:
2630
      raise errors.OpPrereqError("Diskless instance but disk"
2631
                                 " information passed", errors.ECODE_INVAL)
2632
    disks = []
2633
  else:
2634
    if (not opts.disks and not opts.sd_size
2635
        and mode == constants.INSTANCE_CREATE):
2636
      raise errors.OpPrereqError("No disk information specified",
2637
                                 errors.ECODE_INVAL)
2638
    if opts.disks and opts.sd_size is not None:
2639
      raise errors.OpPrereqError("Please use either the '--disk' or"
2640
                                 " '-s' option", errors.ECODE_INVAL)
2641
    if opts.sd_size is not None:
2642
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2643

    
2644
    if opts.disks:
2645
      try:
2646
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2647
      except ValueError, err:
2648
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2649
                                   errors.ECODE_INVAL)
2650
      disks = [{}] * disk_max
2651
    else:
2652
      disks = []
2653
    for didx, ddict in opts.disks:
2654
      didx = int(didx)
2655
      if not isinstance(ddict, dict):
2656
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2657
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2658
      elif constants.IDISK_SIZE in ddict:
2659
        if constants.IDISK_ADOPT in ddict:
2660
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2661
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2662
        try:
2663
          ddict[constants.IDISK_SIZE] = \
2664
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2665
        except ValueError, err:
2666
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2667
                                     (didx, err), errors.ECODE_INVAL)
2668
      elif constants.IDISK_ADOPT in ddict:
2669
        if constants.IDISK_SPINDLES in ddict:
2670
          raise errors.OpPrereqError("spindles is not a valid option when"
2671
                                     " adopting a disk", errors.ECODE_INVAL)
2672
        if mode == constants.INSTANCE_IMPORT:
2673
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2674
                                     " import", errors.ECODE_INVAL)
2675
        ddict[constants.IDISK_SIZE] = 0
2676
      else:
2677
        raise errors.OpPrereqError("Missing size or adoption source for"
2678
                                   " disk %d" % didx, errors.ECODE_INVAL)
2679
      disks[didx] = ddict
2680

    
2681
  if opts.tags is not None:
2682
    tags = opts.tags.split(",")
2683
  else:
2684
    tags = []
2685

    
2686
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2687
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2688

    
2689
  if mode == constants.INSTANCE_CREATE:
2690
    start = opts.start
2691
    os_type = opts.os
2692
    force_variant = opts.force_variant
2693
    src_node = None
2694
    src_path = None
2695
    no_install = opts.no_install
2696
    identify_defaults = False
2697
  elif mode == constants.INSTANCE_IMPORT:
2698
    start = False
2699
    os_type = None
2700
    force_variant = False
2701
    src_node = opts.src_node
2702
    src_path = opts.src_dir
2703
    no_install = None
2704
    identify_defaults = opts.identify_defaults
2705
  else:
2706
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2707

    
2708
  op = opcodes.OpInstanceCreate(instance_name=instance,
2709
                                disks=disks,
2710
                                disk_template=opts.disk_template,
2711
                                nics=nics,
2712
                                conflicts_check=opts.conflicts_check,
2713
                                pnode=pnode, snode=snode,
2714
                                ip_check=opts.ip_check,
2715
                                name_check=opts.name_check,
2716
                                wait_for_sync=opts.wait_for_sync,
2717
                                file_storage_dir=opts.file_storage_dir,
2718
                                file_driver=opts.file_driver,
2719
                                iallocator=opts.iallocator,
2720
                                hypervisor=hypervisor,
2721
                                hvparams=hvparams,
2722
                                beparams=opts.beparams,
2723
                                osparams=opts.osparams,
2724
                                mode=mode,
2725
                                start=start,
2726
                                os_type=os_type,
2727
                                force_variant=force_variant,
2728
                                src_node=src_node,
2729
                                src_path=src_path,
2730
                                tags=tags,
2731
                                no_install=no_install,
2732
                                identify_defaults=identify_defaults,
2733
                                ignore_ipolicy=opts.ignore_ipolicy)
2734

    
2735
  SubmitOrSend(op, opts)
2736
  return 0
2737

    
2738

    
2739
class _RunWhileClusterStoppedHelper:
2740
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2741

2742
  """
2743
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2744
    """Initializes this class.
2745

2746
    @type feedback_fn: callable
2747
    @param feedback_fn: Feedback function
2748
    @type cluster_name: string
2749
    @param cluster_name: Cluster name
2750
    @type master_node: string
2751
    @param master_node Master node name
2752
    @type online_nodes: list
2753
    @param online_nodes: List of names of online nodes
2754

2755
    """
2756
    self.feedback_fn = feedback_fn
2757
    self.cluster_name = cluster_name
2758
    self.master_node = master_node
2759
    self.online_nodes = online_nodes
2760

    
2761
    self.ssh = ssh.SshRunner(self.cluster_name)
2762

    
2763
    self.nonmaster_nodes = [name for name in online_nodes
2764
                            if name != master_node]
2765

    
2766
    assert self.master_node not in self.nonmaster_nodes
2767

    
2768
  def _RunCmd(self, node_name, cmd):
2769
    """Runs a command on the local or a remote machine.
2770

2771
    @type node_name: string
2772
    @param node_name: Machine name
2773
    @type cmd: list
2774
    @param cmd: Command
2775

2776
    """
2777
    if node_name is None or node_name == self.master_node:
2778
      # No need to use SSH
2779
      result = utils.RunCmd(cmd)
2780
    else:
2781
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2782
                            utils.ShellQuoteArgs(cmd))
2783

    
2784
    if result.failed:
2785
      errmsg = ["Failed to run command %s" % result.cmd]
2786
      if node_name:
2787
        errmsg.append("on node %s" % node_name)
2788
      errmsg.append(": exitcode %s and error %s" %
2789
                    (result.exit_code, result.output))
2790
      raise errors.OpExecError(" ".join(errmsg))
2791

    
2792
  def Call(self, fn, *args):
2793
    """Call function while all daemons are stopped.
2794

2795
    @type fn: callable
2796
    @param fn: Function to be called
2797

2798
    """
2799
    # Pause watcher by acquiring an exclusive lock on watcher state file
2800
    self.feedback_fn("Blocking watcher")
2801
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2802
    try:
2803
      # TODO: Currently, this just blocks. There's no timeout.
2804
      # TODO: Should it be a shared lock?
2805
      watcher_block.Exclusive(blocking=True)
2806

    
2807
      # Stop master daemons, so that no new jobs can come in and all running
2808
      # ones are finished
2809
      self.feedback_fn("Stopping master daemons")
2810
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2811
      try:
2812
        # Stop daemons on all nodes
2813
        for node_name in self.online_nodes:
2814
          self.feedback_fn("Stopping daemons on %s" % node_name)
2815
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2816

    
2817
        # All daemons are shut down now
2818
        try:
2819
          return fn(self, *args)
2820
        except Exception, err:
2821
          _, errmsg = FormatError(err)
2822
          logging.exception("Caught exception")
2823
          self.feedback_fn(errmsg)
2824
          raise
2825
      finally:
2826
        # Start cluster again, master node last
2827
        for node_name in self.nonmaster_nodes + [self.master_node]:
2828
          self.feedback_fn("Starting daemons on %s" % node_name)
2829
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2830
    finally:
2831
      # Resume watcher
2832
      watcher_block.Close()
2833

    
2834

    
2835
def RunWhileClusterStopped(feedback_fn, fn, *args):
2836
  """Calls a function while all cluster daemons are stopped.
2837

2838
  @type feedback_fn: callable
2839
  @param feedback_fn: Feedback function
2840
  @type fn: callable
2841
  @param fn: Function to be called when daemons are stopped
2842

2843
  """
2844
  feedback_fn("Gathering cluster information")
2845

    
2846
  # This ensures we're running on the master daemon
2847
  cl = GetClient()
2848

    
2849
  (cluster_name, master_node) = \
2850
    cl.QueryConfigValues(["cluster_name", "master_node"])
2851

    
2852
  online_nodes = GetOnlineNodes([], cl=cl)
2853

    
2854
  # Don't keep a reference to the client. The master daemon will go away.
2855
  del cl
2856

    
2857
  assert master_node in online_nodes
2858

    
2859
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2860
                                       online_nodes).Call(fn, *args)
2861

    
2862

    
2863
def GenerateTable(headers, fields, separator, data,
2864
                  numfields=None, unitfields=None,
2865
                  units=None):
2866
  """Prints a table with headers and different fields.
2867

2868
  @type headers: dict
2869
  @param headers: dictionary mapping field names to headers for
2870
      the table
2871
  @type fields: list
2872
  @param fields: the field names corresponding to each row in
2873
      the data field
2874
  @param separator: the separator to be used; if this is None,
2875
      the default 'smart' algorithm is used which computes optimal
2876
      field width, otherwise just the separator is used between
2877
      each field
2878
  @type data: list
2879
  @param data: a list of lists, each sublist being one row to be output
2880
  @type numfields: list
2881
  @param numfields: a list with the fields that hold numeric
2882
      values and thus should be right-aligned
2883
  @type unitfields: list
2884
  @param unitfields: a list with the fields that hold numeric
2885
      values that should be formatted with the units field
2886
  @type units: string or None
2887
  @param units: the units we should use for formatting, or None for
2888
      automatic choice (human-readable for non-separator usage, otherwise
2889
      megabytes); this is a one-letter string
2890

2891
  """
2892
  if units is None:
2893
    if separator:
2894
      units = "m"
2895
    else:
2896
      units = "h"
2897

    
2898
  if numfields is None:
2899
    numfields = []
2900
  if unitfields is None:
2901
    unitfields = []
2902

    
2903
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2904
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2905

    
2906
  format_fields = []
2907
  for field in fields:
2908
    if headers and field not in headers:
2909
      # TODO: handle better unknown fields (either revert to old
2910
      # style of raising exception, or deal more intelligently with
2911
      # variable fields)
2912
      headers[field] = field
2913
    if separator is not None:
2914
      format_fields.append("%s")
2915
    elif numfields.Matches(field):
2916
      format_fields.append("%*s")
2917
    else:
2918
      format_fields.append("%-*s")
2919

    
2920
  if separator is None:
2921
    mlens = [0 for name in fields]
2922
    format_str = " ".join(format_fields)
2923
  else:
2924
    format_str = separator.replace("%", "%%").join(format_fields)
2925

    
2926
  for row in data:
2927
    if row is None:
2928
      continue
2929
    for idx, val in enumerate(row):
2930
      if unitfields.Matches(fields[idx]):
2931
        try:
2932
          val = int(val)
2933
        except (TypeError, ValueError):
2934
          pass
2935
        else:
2936
          val = row[idx] = utils.FormatUnit(val, units)
2937
      val = row[idx] = str(val)
2938
      if separator is None:
2939
        mlens[idx] = max(mlens[idx], len(val))
2940

    
2941
  result = []
2942
  if headers:
2943
    args = []
2944
    for idx, name in enumerate(fields):
2945
      hdr = headers[name]
2946
      if separator is None:
2947
        mlens[idx] = max(mlens[idx], len(hdr))
2948
        args.append(mlens[idx])
2949
      args.append(hdr)
2950
    result.append(format_str % tuple(args))
2951

    
2952
  if separator is None:
2953
    assert len(mlens) == len(fields)
2954

    
2955
    if fields and not numfields.Matches(fields[-1]):
2956
      mlens[-1] = 0
2957

    
2958
  for line in data:
2959
    args = []
2960
    if line is None:
2961
      line = ["-" for _ in fields]
2962
    for idx in range(len(fields)):
2963
      if separator is None:
2964
        args.append(mlens[idx])
2965
      args.append(line[idx])
2966
    result.append(format_str % tuple(args))
2967

    
2968
  return result
2969

    
2970

    
2971
def _FormatBool(value):
2972
  """Formats a boolean value as a string.
2973

2974
  """
2975
  if value:
2976
    return "Y"
2977
  return "N"
2978

    
2979

    
2980
#: Default formatting for query results; (callback, align right)
2981
_DEFAULT_FORMAT_QUERY = {
2982
  constants.QFT_TEXT: (str, False),
2983
  constants.QFT_BOOL: (_FormatBool, False),
2984
  constants.QFT_NUMBER: (str, True),
2985
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2986
  constants.QFT_OTHER: (str, False),
2987
  constants.QFT_UNKNOWN: (str, False),
2988
  }
2989

    
2990

    
2991
def _GetColumnFormatter(fdef, override, unit):
2992
  """Returns formatting function for a field.
2993

2994
  @type fdef: L{objects.QueryFieldDefinition}
2995
  @type override: dict
2996
  @param override: Dictionary for overriding field formatting functions,
2997
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2998
  @type unit: string
2999
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3000
  @rtype: tuple; (callable, bool)
3001
  @return: Returns the function to format a value (takes one parameter) and a
3002
    boolean for aligning the value on the right-hand side
3003

3004
  """
3005
  fmt = override.get(fdef.name, None)
3006
  if fmt is not None:
3007
    return fmt
3008

    
3009
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3010

    
3011
  if fdef.kind == constants.QFT_UNIT:
3012
    # Can't keep this information in the static dictionary
3013
    return (lambda value: utils.FormatUnit(value, unit), True)
3014

    
3015
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3016
  if fmt is not None:
3017
    return fmt
3018

    
3019
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3020

    
3021

    
3022
class _QueryColumnFormatter:
3023
  """Callable class for formatting fields of a query.
3024

3025
  """
3026
  def __init__(self, fn, status_fn, verbose):
3027
    """Initializes this class.
3028

3029
    @type fn: callable
3030
    @param fn: Formatting function
3031
    @type status_fn: callable
3032
    @param status_fn: Function to report fields' status
3033
    @type verbose: boolean
3034
    @param verbose: whether to use verbose field descriptions or not
3035

3036
    """
3037
    self._fn = fn
3038
    self._status_fn = status_fn
3039
    self._verbose = verbose
3040

    
3041
  def __call__(self, data):
3042
    """Returns a field's string representation.
3043

3044
    """
3045
    (status, value) = data
3046

    
3047
    # Report status
3048
    self._status_fn(status)
3049

    
3050
    if status == constants.RS_NORMAL:
3051
      return self._fn(value)
3052

    
3053
    assert value is None, \
3054
           "Found value %r for abnormal status %s" % (value, status)
3055

    
3056
    return FormatResultError(status, self._verbose)
3057

    
3058

    
3059
def FormatResultError(status, verbose):
3060
  """Formats result status other than L{constants.RS_NORMAL}.
3061

3062
  @param status: The result status
3063
  @type verbose: boolean
3064
  @param verbose: Whether to return the verbose text
3065
  @return: Text of result status
3066

3067
  """
3068
  assert status != constants.RS_NORMAL, \
3069
         "FormatResultError called with status equal to constants.RS_NORMAL"
3070
  try:
3071
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3072
  except KeyError:
3073
    raise NotImplementedError("Unknown status %s" % status)
3074
  else:
3075
    if verbose:
3076
      return verbose_text
3077
    return normal_text
3078

    
3079

    
3080
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3081
                      header=False, verbose=False):
3082
  """Formats data in L{objects.QueryResponse}.
3083

3084
  @type result: L{objects.QueryResponse}
3085
  @param result: result of query operation
3086
  @type unit: string
3087
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3088
    see L{utils.text.FormatUnit}
3089
  @type format_override: dict
3090
  @param format_override: Dictionary for overriding field formatting functions,
3091
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3092
  @type separator: string or None
3093
  @param separator: String used to separate fields
3094
  @type header: bool
3095
  @param header: Whether to output header row
3096
  @type verbose: boolean
3097
  @param verbose: whether to use verbose field descriptions or not
3098

3099
  """
3100
  if unit is None:
3101
    if separator:
3102
      unit = "m"
3103
    else:
3104
      unit = "h"
3105

    
3106
  if format_override is None:
3107
    format_override = {}
3108

    
3109
  stats = dict.fromkeys(constants.RS_ALL, 0)
3110

    
3111
  def _RecordStatus(status):
3112
    if status in stats:
3113
      stats[status] += 1
3114

    
3115
  columns = []
3116
  for fdef in result.fields:
3117
    assert fdef.title and fdef.name
3118
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3119
    columns.append(TableColumn(fdef.title,
3120
                               _QueryColumnFormatter(fn, _RecordStatus,
3121
                                                     verbose),
3122
                               align_right))
3123

    
3124
  table = FormatTable(result.data, columns, header, separator)
3125

    
3126
  # Collect statistics
3127
  assert len(stats) == len(constants.RS_ALL)
3128
  assert compat.all(count >= 0 for count in stats.values())
3129

    
3130
  # Determine overall status. If there was no data, unknown fields must be
3131
  # detected via the field definitions.
3132
  if (stats[constants.RS_UNKNOWN] or
3133
      (not result.data and _GetUnknownFields(result.fields))):
3134
    status = QR_UNKNOWN
3135
  elif compat.any(count > 0 for key, count in stats.items()
3136
                  if key != constants.RS_NORMAL):
3137
    status = QR_INCOMPLETE
3138
  else:
3139
    status = QR_NORMAL
3140

    
3141
  return (status, table)
3142

    
3143

    
3144
def _GetUnknownFields(fdefs):
3145
  """Returns list of unknown fields included in C{fdefs}.
3146

3147
  @type fdefs: list of L{objects.QueryFieldDefinition}
3148

3149
  """
3150
  return [fdef for fdef in fdefs
3151
          if fdef.kind == constants.QFT_UNKNOWN]
3152

    
3153

    
3154
def _WarnUnknownFields(fdefs):
3155
  """Prints a warning to stderr if a query included unknown fields.
3156

3157
  @type fdefs: list of L{objects.QueryFieldDefinition}
3158

3159
  """
3160
  unknown = _GetUnknownFields(fdefs)
3161
  if unknown:
3162
    ToStderr("Warning: Queried for unknown fields %s",
3163
             utils.CommaJoin(fdef.name for fdef in unknown))
3164
    return True
3165

    
3166
  return False
3167

    
3168

    
3169
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3170
                format_override=None, verbose=False, force_filter=False,
3171
                namefield=None, qfilter=None, isnumeric=False):
3172
  """Generic implementation for listing all items of a resource.
3173

3174
  @param resource: One of L{constants.QR_VIA_LUXI}
3175
  @type fields: list of strings
3176
  @param fields: List of fields to query for
3177
  @type names: list of strings
3178
  @param names: Names of items to query for
3179
  @type unit: string or None
3180
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3181
    None for automatic choice (human-readable for non-separator usage,
3182
    otherwise megabytes); this is a one-letter string
3183
  @type separator: string or None
3184
  @param separator: String used to separate fields
3185
  @type header: bool
3186
  @param header: Whether to show header row
3187
  @type force_filter: bool
3188
  @param force_filter: Whether to always treat names as filter
3189
  @type format_override: dict
3190
  @param format_override: Dictionary for overriding field formatting functions,
3191
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3192
  @type verbose: boolean
3193
  @param verbose: whether to use verbose field descriptions or not
3194
  @type namefield: string
3195
  @param namefield: Name of field to use for simple filters (see
3196
    L{qlang.MakeFilter} for details)
3197
  @type qfilter: list or None
3198
  @param qfilter: Query filter (in addition to names)
3199
  @param isnumeric: bool
3200
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3201
    any simple filters built by namefield should use integer values to
3202
    reflect that
3203

3204
  """
3205
  if not names:
3206
    names = None
3207

    
3208
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3209
                                isnumeric=isnumeric)
3210

    
3211
  if qfilter is None:
3212
    qfilter = namefilter
3213
  elif namefilter is not None:
3214
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3215

    
3216
  if cl is None:
3217
    cl = GetClient()
3218

    
3219
  response = cl.Query(resource, fields, qfilter)
3220

    
3221
  found_unknown = _WarnUnknownFields(response.fields)
3222

    
3223
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3224
                                     header=header,
3225
                                     format_override=format_override,
3226
                                     verbose=verbose)
3227

    
3228
  for line in data:
3229
    ToStdout(line)
3230

    
3231
  assert ((found_unknown and status == QR_UNKNOWN) or
3232
          (not found_unknown and status != QR_UNKNOWN))
3233

    
3234
  if status == QR_UNKNOWN:
3235
    return constants.EXIT_UNKNOWN_FIELD
3236

    
3237
  # TODO: Should the list command fail if not all data could be collected?
3238
  return constants.EXIT_SUCCESS
3239

    
3240

    
3241
def _FieldDescValues(fdef):
3242
  """Helper function for L{GenericListFields} to get query field description.
3243

3244
  @type fdef: L{objects.QueryFieldDefinition}
3245
  @rtype: list
3246

3247
  """
3248
  return [
3249
    fdef.name,
3250
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3251
    fdef.title,
3252
    fdef.doc,
3253
    ]
3254

    
3255

    
3256
def GenericListFields(resource, fields, separator, header, cl=None):
3257
  """Generic implementation for listing fields for a resource.
3258

3259
  @param resource: One of L{constants.QR_VIA_LUXI}
3260
  @type fields: list of strings
3261
  @param fields: List of fields to query for
3262
  @type separator: string or None
3263
  @param separator: String used to separate fields
3264
  @type header: bool
3265
  @param header: Whether to show header row
3266

3267
  """
3268
  if cl is None:
3269
    cl = GetClient()
3270

    
3271
  if not fields:
3272
    fields = None
3273

    
3274
  response = cl.QueryFields(resource, fields)
3275

    
3276
  found_unknown = _WarnUnknownFields(response.fields)
3277

    
3278
  columns = [
3279
    TableColumn("Name", str, False),
3280
    TableColumn("Type", str, False),
3281
    TableColumn("Title", str, False),
3282
    TableColumn("Description", str, False),
3283
    ]
3284

    
3285
  rows = map(_FieldDescValues, response.fields)
3286

    
3287
  for line in FormatTable(rows, columns, header, separator):
3288
    ToStdout(line)
3289

    
3290
  if found_unknown:
3291
    return constants.EXIT_UNKNOWN_FIELD
3292

    
3293
  return constants.EXIT_SUCCESS
3294

    
3295

    
3296
class TableColumn:
3297
  """Describes a column for L{FormatTable}.
3298

3299
  """
3300
  def __init__(self, title, fn, align_right):
3301
    """Initializes this class.
3302

3303
    @type title: string
3304
    @param title: Column title
3305
    @type fn: callable
3306
    @param fn: Formatting function
3307
    @type align_right: bool
3308
    @param align_right: Whether to align values on the right-hand side
3309

3310
    """
3311
    self.title = title
3312
    self.format = fn
3313
    self.align_right = align_right
3314

    
3315

    
3316
def _GetColFormatString(width, align_right):
3317
  """Returns the format string for a field.
3318

3319
  """
3320
  if align_right:
3321
    sign = ""
3322
  else:
3323
    sign = "-"
3324

    
3325
  return "%%%s%ss" % (sign, width)
3326

    
3327

    
3328
def FormatTable(rows, columns, header, separator):
3329
  """Formats data as a table.
3330

3331
  @type rows: list of lists
3332
  @param rows: Row data, one list per row
3333
  @type columns: list of L{TableColumn}
3334
  @param columns: Column descriptions
3335
  @type header: bool
3336
  @param header: Whether to show header row
3337
  @type separator: string or None
3338
  @param separator: String used to separate columns
3339

3340
  """
3341
  if header:
3342
    data = [[col.title for col in columns]]
3343
    colwidth = [len(col.title) for col in columns]
3344
  else:
3345
    data = []
3346
    colwidth = [0 for _ in columns]
3347

    
3348
  # Format row data
3349
  for row in rows:
3350
    assert len(row) == len(columns)
3351

    
3352
    formatted = [col.format(value) for value, col in zip(row, columns)]
3353

    
3354
    if separator is None:
3355
      # Update column widths
3356
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3357
        # Modifying a list's items while iterating is fine
3358
        colwidth[idx] = max(oldwidth, len(value))
3359

    
3360
    data.append(formatted)
3361

    
3362
  if separator is not None:
3363
    # Return early if a separator is used
3364
    return [separator.join(row) for row in data]
3365

    
3366
  if columns and not columns[-1].align_right:
3367
    # Avoid unnecessary spaces at end of line
3368
    colwidth[-1] = 0
3369

    
3370
  # Build format string
3371
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3372
                  for col, width in zip(columns, colwidth)])
3373

    
3374
  return [fmt % tuple(row) for row in data]
3375

    
3376

    
3377
def FormatTimestamp(ts):
3378
  """Formats a given timestamp.
3379

3380
  @type ts: timestamp
3381
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3382

3383
  @rtype: string
3384
  @return: a string with the formatted timestamp
3385

3386
  """
3387
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3388
    return "?"
3389

    
3390
  (sec, usecs) = ts
3391
  return utils.FormatTime(sec, usecs=usecs)
3392

    
3393

    
3394
def ParseTimespec(value):
3395
  """Parse a time specification.
3396

3397
  The following suffixed will be recognized:
3398

3399
    - s: seconds
3400
    - m: minutes
3401
    - h: hours
3402
    - d: day
3403
    - w: weeks
3404

3405
  Without any suffix, the value will be taken to be in seconds.
3406

3407
  """
3408
  value = str(value)
3409
  if not value:
3410
    raise errors.OpPrereqError("Empty time specification passed",
3411
                               errors.ECODE_INVAL)
3412
  suffix_map = {
3413
    "s": 1,
3414
    "m": 60,
3415
    "h": 3600,
3416
    "d": 86400,
3417
    "w": 604800,
3418
    }
3419
  if value[-1] not in suffix_map:
3420
    try:
3421
      value = int(value)
3422
    except (TypeError, ValueError):
3423
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3424
                                 errors.ECODE_INVAL)
3425
  else:
3426
    multiplier = suffix_map[value[-1]]
3427
    value = value[:-1]
3428
    if not value: # no data left after stripping the suffix
3429
      raise errors.OpPrereqError("Invalid time specification (only"
3430
                                 " suffix passed)", errors.ECODE_INVAL)
3431
    try:
3432
      value = int(value) * multiplier
3433
    except (TypeError, ValueError):
3434
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3435
                                 errors.ECODE_INVAL)
3436
  return value
3437

    
3438

    
3439
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3440
                   filter_master=False, nodegroup=None):
3441
  """Returns the names of online nodes.
3442

3443
  This function will also log a warning on stderr with the names of
3444
  the online nodes.
3445

3446
  @param nodes: if not empty, use only this subset of nodes (minus the
3447
      offline ones)
3448
  @param cl: if not None, luxi client to use
3449
  @type nowarn: boolean
3450
  @param nowarn: by default, this function will output a note with the
3451
      offline nodes that are skipped; if this parameter is True the
3452
      note is not displayed
3453
  @type secondary_ips: boolean
3454
  @param secondary_ips: if True, return the secondary IPs instead of the
3455
      names, useful for doing network traffic over the replication interface
3456
      (if any)
3457
  @type filter_master: boolean
3458
  @param filter_master: if True, do not return the master node in the list
3459
      (useful in coordination with secondary_ips where we cannot check our
3460
      node name against the list)
3461
  @type nodegroup: string
3462
  @param nodegroup: If set, only return nodes in this node group
3463

3464
  """
3465
  if cl is None:
3466
    cl = GetClient()
3467

    
3468
  qfilter = []
3469

    
3470
  if nodes:
3471
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3472

    
3473
  if nodegroup is not None:
3474
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3475
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3476

    
3477
  if filter_master:
3478
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3479

    
3480
  if qfilter:
3481
    if len(qfilter) > 1:
3482
      final_filter = [qlang.OP_AND] + qfilter
3483
    else:
3484
      assert len(qfilter) == 1
3485
      final_filter = qfilter[0]
3486
  else:
3487
    final_filter = None
3488

    
3489
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3490

    
3491
  def _IsOffline(row):
3492
    (_, (_, offline), _) = row
3493
    return offline
3494

    
3495
  def _GetName(row):
3496
    ((_, name), _, _) = row
3497
    return name
3498

    
3499
  def _GetSip(row):
3500
    (_, _, (_, sip)) = row
3501
    return sip
3502

    
3503
  (offline, online) = compat.partition(result.data, _IsOffline)
3504

    
3505
  if offline and not nowarn:
3506
    ToStderr("Note: skipping offline node(s): %s" %
3507
             utils.CommaJoin(map(_GetName, offline)))
3508

    
3509
  if secondary_ips:
3510
    fn = _GetSip
3511
  else:
3512
    fn = _GetName
3513

    
3514
  return map(fn, online)
3515

    
3516

    
3517
def _ToStream(stream, txt, *args):
3518
  """Write a message to a stream, bypassing the logging system
3519

3520
  @type stream: file object
3521
  @param stream: the file to which we should write
3522
  @type txt: str
3523
  @param txt: the message
3524

3525
  """
3526
  try:
3527
    if args:
3528
      args = tuple(args)
3529
      stream.write(txt % args)
3530
    else:
3531
      stream.write(txt)
3532
    stream.write("\n")
3533
    stream.flush()
3534
  except IOError, err:
3535
    if err.errno == errno.EPIPE:
3536
      # our terminal went away, we'll exit
3537
      sys.exit(constants.EXIT_FAILURE)
3538
    else:
3539
      raise
3540

    
3541

    
3542
def ToStdout(txt, *args):
3543
  """Write a message to stdout only, bypassing the logging system
3544

3545
  This is just a wrapper over _ToStream.
3546

3547
  @type txt: str
3548
  @param txt: the message
3549

3550
  """
3551
  _ToStream(sys.stdout, txt, *args)
3552

    
3553

    
3554
def ToStderr(txt, *args):
3555
  """Write a message to stderr only, bypassing the logging system
3556

3557
  This is just a wrapper over _ToStream.
3558

3559
  @type txt: str
3560
  @param txt: the message
3561

3562
  """
3563
  _ToStream(sys.stderr, txt, *args)
3564

    
3565

    
3566
class JobExecutor(object):
3567
  """Class which manages the submission and execution of multiple jobs.
3568

3569
  Note that instances of this class should not be reused between
3570
  GetResults() calls.
3571

3572
  """
3573
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3574
    self.queue = []
3575
    if cl is None:
3576
      cl = GetClient()
3577
    self.cl = cl
3578
    self.verbose = verbose
3579
    self.jobs = []
3580
    self.opts = opts
3581
    self.feedback_fn = feedback_fn
3582
    self._counter = itertools.count()
3583

    
3584
  @staticmethod
3585
  def _IfName(name, fmt):
3586
    """Helper function for formatting name.
3587

3588
    """
3589
    if name:
3590
      return fmt % name
3591

    
3592
    return ""
3593

    
3594
  def QueueJob(self, name, *ops):
3595
    """Record a job for later submit.
3596

3597
    @type name: string
3598
    @param name: a description of the job, will be used in WaitJobSet
3599

3600
    """
3601
    SetGenericOpcodeOpts(ops, self.opts)
3602
    self.queue.append((self._counter.next(), name, ops))
3603

    
3604
  def AddJobId(self, name, status, job_id):
3605
    """Adds a job ID to the internal queue.
3606

3607
    """
3608
    self.jobs.append((self._counter.next(), status, job_id, name))
3609

    
3610
  def SubmitPending(self, each=False):
3611
    """Submit all pending jobs.
3612

3613
    """
3614
    if each:
3615
      results = []
3616
      for (_, _, ops) in self.queue:
3617
        # SubmitJob will remove the success status, but raise an exception if
3618
        # the submission fails, so we'll notice that anyway.
3619
        results.append([True, self.cl.SubmitJob(ops)[0]])
3620
    else:
3621
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3622
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3623
      self.jobs.append((idx, status, data, name))
3624

    
3625
  def _ChooseJob(self):
3626
    """Choose a non-waiting/queued job to poll next.
3627

3628
    """
3629
    assert self.jobs, "_ChooseJob called with empty job list"
3630

    
3631
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3632
                               ["status"])
3633
    assert result
3634

    
3635
    for job_data, status in zip(self.jobs, result):
3636
      if (isinstance(status, list) and status and
3637
          status[0] in (constants.JOB_STATUS_QUEUED,
3638
                        constants.JOB_STATUS_WAITING,
3639
                        constants.JOB_STATUS_CANCELING)):
3640
        # job is still present and waiting
3641
        continue
3642
      # good candidate found (either running job or lost job)
3643
      self.jobs.remove(job_data)
3644
      return job_data
3645

    
3646
    # no job found
3647
    return self.jobs.pop(0)
3648

    
3649
  def GetResults(self):
3650
    """Wait for and return the results of all jobs.
3651

3652
    @rtype: list
3653
    @return: list of tuples (success, job results), in the same order
3654
        as the submitted jobs; if a job has failed, instead of the result
3655
        there will be the error message
3656

3657
    """
3658
    if not self.jobs:
3659
      self.SubmitPending()
3660
    results = []
3661
    if self.verbose:
3662
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3663
      if ok_jobs:
3664
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3665

    
3666
    # first, remove any non-submitted jobs
3667
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3668
    for idx, _, jid, name in failures:
3669
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3670
      results.append((idx, False, jid))
3671

    
3672
    while self.jobs:
3673
      (idx, _, jid, name) = self._ChooseJob()
3674
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3675
      try:
3676
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3677
        success = True
3678
      except errors.JobLost, err:
3679
        _, job_result = FormatError(err)
3680
        ToStderr("Job %s%s has been archived, cannot check its result",
3681
                 jid, self._IfName(name, " for %s"))
3682
        success = False
3683
      except (errors.GenericError, luxi.ProtocolError), err:
3684
        _, job_result = FormatError(err)
3685
        success = False
3686
        # the error message will always be shown, verbose or not
3687
        ToStderr("Job %s%s has failed: %s",
3688
                 jid, self._IfName(name, " for %s"), job_result)
3689

    
3690
      results.append((idx, success, job_result))
3691

    
3692
    # sort based on the index, then drop it
3693
    results.sort()
3694
    results = [i[1:] for i in results]
3695

    
3696
    return results
3697

    
3698
  def WaitOrShow(self, wait):
3699
    """Wait for job results or only print the job IDs.
3700

3701
    @type wait: boolean
3702
    @param wait: whether to wait or not
3703

3704
    """
3705
    if wait:
3706
      return self.GetResults()
3707
    else:
3708
      if not self.jobs:
3709
        self.SubmitPending()
3710
      for _, status, result, name in self.jobs:
3711
        if status:
3712
          ToStdout("%s: %s", result, name)
3713
        else:
3714
          ToStderr("Failure for %s: %s", name, result)
3715
      return [row[1:3] for row in self.jobs]
3716

    
3717

    
3718
def FormatParamsDictInfo(param_dict, actual):
3719
  """Formats a parameter dictionary.
3720

3721
  @type param_dict: dict
3722
  @param param_dict: the own parameters
3723
  @type actual: dict
3724
  @param actual: the current parameter set (including defaults)
3725
  @rtype: dict
3726
  @return: dictionary where the value of each parameter is either a fully
3727
      formatted string or a dictionary containing formatted strings
3728

3729
  """
3730
  ret = {}
3731
  for (key, data) in actual.items():
3732
    if isinstance(data, dict) and data:
3733
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3734
    else:
3735
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3736
  return ret
3737

    
3738

    
3739
def _FormatListInfoDefault(data, def_data):
3740
  if data is not None:
3741
    ret = utils.CommaJoin(data)
3742
  else:
3743
    ret = "default (%s)" % utils.CommaJoin(def_data)
3744
  return ret
3745

    
3746

    
3747
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3748
  """Formats an instance policy.
3749

3750
  @type custom_ipolicy: dict
3751
  @param custom_ipolicy: own policy
3752
  @type eff_ipolicy: dict
3753
  @param eff_ipolicy: effective policy (including defaults); ignored for
3754
      cluster
3755
  @type iscluster: bool
3756
  @param iscluster: the policy is at cluster level
3757
  @rtype: list of pairs
3758
  @return: formatted data, suitable for L{PrintGenericInfo}
3759

3760
  """
3761
  if iscluster:
3762
    eff_ipolicy = custom_ipolicy
3763

    
3764
  minmax_out = []
3765
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3766
  if custom_minmax:
3767
    for (k, minmax) in enumerate(custom_minmax):
3768
      minmax_out.append([
3769
        ("%s/%s" % (key, k),
3770
         FormatParamsDictInfo(minmax[key], minmax[key]))
3771
        for key in constants.ISPECS_MINMAX_KEYS
3772
        ])
3773
  else:
3774
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3775
      minmax_out.append([
3776
        ("%s/%s" % (key, k),
3777
         FormatParamsDictInfo({}, minmax[key]))
3778
        for key in constants.ISPECS_MINMAX_KEYS
3779
        ])
3780
  ret = [("bounds specs", minmax_out)]
3781

    
3782
  if iscluster:
3783
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3784
    ret.append(
3785
      (constants.ISPECS_STD,
3786
       FormatParamsDictInfo(stdspecs, stdspecs))
3787
      )
3788

    
3789
  ret.append(
3790
    ("allowed disk templates",
3791
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3792
                            eff_ipolicy[constants.IPOLICY_DTS]))
3793
    )
3794
  ret.extend([
3795
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3796
    for key in constants.IPOLICY_PARAMETERS
3797
    ])
3798
  return ret
3799

    
3800

    
3801
def _PrintSpecsParameters(buf, specs):
3802
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3803
  buf.write(",".join(values))
3804

    
3805

    
3806
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3807
  """Print the command option used to generate the given instance policy.
3808

3809
  Currently only the parts dealing with specs are supported.
3810

3811
  @type buf: StringIO
3812
  @param buf: stream to write into
3813
  @type ipolicy: dict
3814
  @param ipolicy: instance policy
3815
  @type isgroup: bool
3816
  @param isgroup: whether the policy is at group level
3817

3818
  """
3819
  if not isgroup:
3820
    stdspecs = ipolicy.get("std")
3821
    if stdspecs:
3822
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3823
      _PrintSpecsParameters(buf, stdspecs)
3824
  minmaxes = ipolicy.get("minmax", [])
3825
  first = True
3826
  for minmax in minmaxes:
3827
    minspecs = minmax.get("min")
3828
    maxspecs = minmax.get("max")
3829
    if minspecs and maxspecs:
3830
      if first:
3831
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3832
        first = False
3833
      else:
3834
        buf.write("//")
3835
      buf.write("min:")
3836
      _PrintSpecsParameters(buf, minspecs)
3837
      buf.write("/max:")
3838
      _PrintSpecsParameters(buf, maxspecs)
3839

    
3840

    
3841
def ConfirmOperation(names, list_type, text, extra=""):
3842
  """Ask the user to confirm an operation on a list of list_type.
3843

3844
  This function is used to request confirmation for doing an operation
3845
  on a given list of list_type.
3846

3847
  @type names: list
3848
  @param names: the list of names that we display when
3849
      we ask for confirmation
3850
  @type list_type: str
3851
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3852
  @type text: str
3853
  @param text: the operation that the user should confirm
3854
  @rtype: boolean
3855
  @return: True or False depending on user's confirmation.
3856

3857
  """
3858
  count = len(names)
3859
  msg = ("The %s will operate on %d %s.\n%s"
3860
         "Do you want to continue?" % (text, count, list_type, extra))
3861
  affected = (("\nAffected %s:\n" % list_type) +
3862
              "\n".join(["  %s" % name for name in names]))
3863

    
3864
  choices = [("y", True, "Yes, execute the %s" % text),
3865
             ("n", False, "No, abort the %s" % text)]
3866

    
3867
  if count > 20:
3868
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3869
    question = msg
3870
  else:
3871
    question = msg + affected
3872

    
3873
  choice = AskUser(question, choices)
3874
  if choice == "v":
3875
    choices.pop(1)
3876
    choice = AskUser(msg + affected, choices)
3877
  return choice
3878

    
3879

    
3880
def _MaybeParseUnit(elements):
3881
  """Parses and returns an array of potential values with units.
3882

3883
  """
3884
  parsed = {}
3885
  for k, v in elements.items():
3886
    if v == constants.VALUE_DEFAULT:
3887
      parsed[k] = v
3888
    else:
3889
      parsed[k] = utils.ParseUnit(v)
3890
  return parsed
3891

    
3892

    
3893
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3894
                             ispecs_disk_count, ispecs_disk_size,
3895
                             ispecs_nic_count, group_ipolicy, fill_all):
3896
  try:
3897
    if ispecs_mem_size:
3898
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3899
    if ispecs_disk_size:
3900
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3901
  except (TypeError, ValueError, errors.UnitParseError), err:
3902
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3903
                               " in policy: %s" %
3904
                               (ispecs_disk_size, ispecs_mem_size, err),
3905
                               errors.ECODE_INVAL)
3906

    
3907
  # prepare ipolicy dict
3908
  ispecs_transposed = {
3909
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3910
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3911
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3912
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3913
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3914
    }
3915

    
3916
  # first, check that the values given are correct
3917
  if group_ipolicy:
3918
    forced_type = TISPECS_GROUP_TYPES
3919
  else:
3920
    forced_type = TISPECS_CLUSTER_TYPES
3921
  for specs in ispecs_transposed.values():
3922
    assert type(specs) is dict
3923
    utils.ForceDictType(specs, forced_type)
3924

    
3925
  # then transpose
3926
  ispecs = {
3927
    constants.ISPECS_MIN: {},
3928
    constants.ISPECS_MAX: {},
3929
    constants.ISPECS_STD: {},
3930
    }
3931
  for (name, specs) in ispecs_transposed.iteritems():
3932
    assert name in constants.ISPECS_PARAMETERS
3933
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3934
      assert key in ispecs
3935
      ispecs[key][name] = val
3936
  minmax_out = {}
3937
  for key in constants.ISPECS_MINMAX_KEYS:
3938
    if fill_all:
3939
      minmax_out[key] = \
3940
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3941
    else:
3942
      minmax_out[key] = ispecs[key]
3943
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3944
  if fill_all:
3945
    ipolicy[constants.ISPECS_STD] = \
3946
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3947
                         ispecs[constants.ISPECS_STD])
3948
  else:
3949
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3950

    
3951

    
3952
def _ParseSpecUnit(spec, keyname):
3953
  ret = spec.copy()
3954
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3955
    if k in ret:
3956
      try:
3957
        ret[k] = utils.ParseUnit(ret[k])
3958
      except (TypeError, ValueError, errors.UnitParseError), err:
3959
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3960
                                    " specs: %s" % (k, ret[k], keyname, err)),
3961
                                   errors.ECODE_INVAL)
3962
  return ret
3963

    
3964

    
3965
def _ParseISpec(spec, keyname, required):
3966
  ret = _ParseSpecUnit(spec, keyname)
3967
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3968
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3969
  if required and missing:
3970
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3971
                               (keyname, utils.CommaJoin(missing)),
3972
                               errors.ECODE_INVAL)
3973
  return ret
3974

    
3975

    
3976
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3977
  ret = None
3978
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3979
      len(minmax_ispecs[0]) == 1):
3980
    for (key, spec) in minmax_ispecs[0].items():
3981
      # This loop is executed exactly once
3982
      if key in allowed_values and not spec:
3983
        ret = key
3984
  return ret
3985

    
3986

    
3987
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3988
                            group_ipolicy, allowed_values):
3989
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3990
  if found_allowed is not None:
3991
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3992
  elif minmax_ispecs is not None:
3993
    minmax_out = []
3994
    for mmpair in minmax_ispecs:
3995
      mmpair_out = {}
3996
      for (key, spec) in mmpair.items():
3997
        if key not in constants.ISPECS_MINMAX_KEYS:
3998
          msg = "Invalid key in bounds instance specifications: %s" % key
3999
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4000
        mmpair_out[key] = _ParseISpec(spec, key, True)
4001
      minmax_out.append(mmpair_out)
4002
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4003
  if std_ispecs is not None:
4004
    assert not group_ipolicy # This is not an option for gnt-group
4005
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4006

    
4007

    
4008
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4009
                          ispecs_cpu_count=None,
4010
                          ispecs_disk_count=None,
4011
                          ispecs_disk_size=None,
4012
                          ispecs_nic_count=None,
4013
                          minmax_ispecs=None,
4014
                          std_ispecs=None,
4015
                          ipolicy_disk_templates=None,
4016
                          ipolicy_vcpu_ratio=None,
4017
                          ipolicy_spindle_ratio=None,
4018
                          group_ipolicy=False,
4019
                          allowed_values=None,
4020
                          fill_all=False):
4021
  """Creation of instance policy based on command line options.
4022

4023
  @param fill_all: whether for cluster policies we should ensure that
4024
    all values are filled
4025

4026
  """
4027
  assert not (fill_all and allowed_values)
4028

    
4029
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4030
                 ispecs_disk_size or ispecs_nic_count)
4031
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4032
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4033
                               " together with any --ipolicy-xxx-specs option",
4034
                               errors.ECODE_INVAL)
4035

    
4036
  ipolicy_out = objects.MakeEmptyIPolicy()
4037
  if split_specs:
4038
    assert fill_all
4039
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4040
                             ispecs_disk_count, ispecs_disk_size,
4041
                             ispecs_nic_count, group_ipolicy, fill_all)
4042
  elif (minmax_ispecs is not None or std_ispecs is not None):
4043
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4044
                            group_ipolicy, allowed_values)
4045

    
4046
  if ipolicy_disk_templates is not None:
4047
    if allowed_values and ipolicy_disk_templates in allowed_values:
4048
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4049
    else:
4050
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4051
  if ipolicy_vcpu_ratio is not None:
4052
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4053
  if ipolicy_spindle_ratio is not None:
4054
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4055

    
4056
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4057

    
4058
  if not group_ipolicy and fill_all:
4059
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4060

    
4061
  return ipolicy_out
4062

    
4063

    
4064
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4065
  """Formatting core of L{PrintGenericInfo}.
4066

4067
  @param buf: (string) stream to accumulate the result into
4068
  @param data: data to format
4069
  @type level: int
4070
  @param level: depth in the data hierarchy, used for indenting
4071
  @type afterkey: bool
4072
  @param afterkey: True when we are in the middle of a line after a key (used
4073
      to properly add newlines or indentation)
4074

4075
  """
4076
  baseind = "  "
4077
  if isinstance(data, dict):
4078
    if not data:
4079
      buf.write("\n")
4080
    else:
4081
      if afterkey:
4082
        buf.write("\n")
4083
        doindent = True
4084
      else:
4085
        doindent = False
4086
      for key in sorted(data):
4087
        if doindent:
4088
          buf.write(baseind * level)
4089
        else:
4090
          doindent = True
4091
        buf.write(key)
4092
        buf.write(": ")
4093
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4094
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4095
    # list of tuples (an ordered dictionary)
4096
    if afterkey:
4097
      buf.write("\n")
4098
      doindent = True
4099
    else:
4100
      doindent = False
4101
    for (key, val) in data:
4102
      if doindent:
4103
        buf.write(baseind * level)
4104
      else:
4105
        doindent = True
4106
      buf.write(key)
4107
      buf.write(": ")
4108
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4109
  elif isinstance(data, list):
4110
    if not data:
4111
      buf.write("\n")
4112
    else:
4113
      if afterkey:
4114
        buf.write("\n")
4115
        doindent = True
4116
      else:
4117
        doindent = False
4118
      for item in data:
4119
        if doindent:
4120
          buf.write(baseind * level)
4121
        else:
4122
          doindent = True
4123
        buf.write("-")
4124
        buf.write(baseind[1:])
4125
        _SerializeGenericInfo(buf, item, level + 1)
4126
  else:
4127
    # This branch should be only taken for strings, but it's practically
4128
    # impossible to guarantee that no other types are produced somewhere
4129
    buf.write(str(data))
4130
    buf.write("\n")
4131

    
4132

    
4133
def PrintGenericInfo(data):
4134
  """Print information formatted according to the hierarchy.
4135

4136
  The output is a valid YAML string.
4137

4138
  @param data: the data to print. It's a hierarchical structure whose elements
4139
      can be:
4140
        - dictionaries, where keys are strings and values are of any of the
4141
          types listed here
4142
        - lists of pairs (key, value), where key is a string and value is of
4143
          any of the types listed here; it's a way to encode ordered
4144
          dictionaries
4145
        - lists of any of the types listed here
4146
        - strings
4147

4148
  """
4149
  buf = StringIO()
4150
  _SerializeGenericInfo(buf, data, 0)
4151
  ToStdout(buf.getvalue().rstrip("\n"))