Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ d2d3935a

History | View | Annotate | Download (132.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INTERVAL_OPT",
112
  "MAC_PREFIX_OPT",
113
  "MAINTAIN_NODE_HEALTH_OPT",
114
  "MASTER_NETDEV_OPT",
115
  "MASTER_NETMASK_OPT",
116
  "MC_OPT",
117
  "MIGRATION_MODE_OPT",
118
  "NET_OPT",
119
  "NETWORK_OPT",
120
  "NETWORK6_OPT",
121
  "NEW_CLUSTER_CERT_OPT",
122
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
123
  "NEW_CONFD_HMAC_KEY_OPT",
124
  "NEW_RAPI_CERT_OPT",
125
  "NEW_PRIMARY_OPT",
126
  "NEW_SECONDARY_OPT",
127
  "NEW_SPICE_CERT_OPT",
128
  "NIC_PARAMS_OPT",
129
  "NOCONFLICTSCHECK_OPT",
130
  "NODE_FORCE_JOIN_OPT",
131
  "NODE_LIST_OPT",
132
  "NODE_PLACEMENT_OPT",
133
  "NODEGROUP_OPT",
134
  "NODE_PARAMS_OPT",
135
  "NODE_POWERED_OPT",
136
  "NODRBD_STORAGE_OPT",
137
  "NOHDR_OPT",
138
  "NOIPCHECK_OPT",
139
  "NO_INSTALL_OPT",
140
  "NONAMECHECK_OPT",
141
  "NOLVM_STORAGE_OPT",
142
  "NOMODIFY_ETCHOSTS_OPT",
143
  "NOMODIFY_SSH_SETUP_OPT",
144
  "NONICS_OPT",
145
  "NONLIVE_OPT",
146
  "NONPLUS1_OPT",
147
  "NORUNTIME_CHGS_OPT",
148
  "NOSHUTDOWN_OPT",
149
  "NOSTART_OPT",
150
  "NOSSH_KEYCHECK_OPT",
151
  "NOVOTING_OPT",
152
  "NO_REMEMBER_OPT",
153
  "NWSYNC_OPT",
154
  "OFFLINE_INST_OPT",
155
  "ONLINE_INST_OPT",
156
  "ON_PRIMARY_OPT",
157
  "ON_SECONDARY_OPT",
158
  "OFFLINE_OPT",
159
  "OSPARAMS_OPT",
160
  "OS_OPT",
161
  "OS_SIZE_OPT",
162
  "OOB_TIMEOUT_OPT",
163
  "POWER_DELAY_OPT",
164
  "PREALLOC_WIPE_DISKS_OPT",
165
  "PRIMARY_IP_VERSION_OPT",
166
  "PRIMARY_ONLY_OPT",
167
  "PRIORITY_OPT",
168
  "RAPI_CERT_OPT",
169
  "READD_OPT",
170
  "REASON_OPT",
171
  "REBOOT_TYPE_OPT",
172
  "REMOVE_INSTANCE_OPT",
173
  "REMOVE_RESERVED_IPS_OPT",
174
  "REMOVE_UIDS_OPT",
175
  "RESERVED_LVS_OPT",
176
  "RUNTIME_MEM_OPT",
177
  "ROMAN_OPT",
178
  "SECONDARY_IP_OPT",
179
  "SECONDARY_ONLY_OPT",
180
  "SELECT_OS_OPT",
181
  "SEP_OPT",
182
  "SHOWCMD_OPT",
183
  "SHOW_MACHINE_OPT",
184
  "SHUTDOWN_TIMEOUT_OPT",
185
  "SINGLE_NODE_OPT",
186
  "SPECS_CPU_COUNT_OPT",
187
  "SPECS_DISK_COUNT_OPT",
188
  "SPECS_DISK_SIZE_OPT",
189
  "SPECS_MEM_SIZE_OPT",
190
  "SPECS_NIC_COUNT_OPT",
191
  "IPOLICY_STD_SPECS_OPT",
192
  "IPOLICY_DISK_TEMPLATES",
193
  "IPOLICY_VCPU_RATIO",
194
  "SPICE_CACERT_OPT",
195
  "SPICE_CERT_OPT",
196
  "SRC_DIR_OPT",
197
  "SRC_NODE_OPT",
198
  "SUBMIT_OPT",
199
  "STARTUP_PAUSED_OPT",
200
  "STATIC_OPT",
201
  "SYNC_OPT",
202
  "TAG_ADD_OPT",
203
  "TAG_SRC_OPT",
204
  "TIMEOUT_OPT",
205
  "TO_GROUP_OPT",
206
  "UIDPOOL_OPT",
207
  "USEUNITS_OPT",
208
  "USE_EXTERNAL_MIP_SCRIPT",
209
  "USE_REPL_NET_OPT",
210
  "VERBOSE_OPT",
211
  "VG_NAME_OPT",
212
  "WFSYNC_OPT",
213
  "YES_DOIT_OPT",
214
  "DISK_STATE_OPT",
215
  "HV_STATE_OPT",
216
  "IGNORE_IPOLICY_OPT",
217
  "INSTANCE_POLICY_OPTS",
218
  # Generic functions for CLI programs
219
  "ConfirmOperation",
220
  "CreateIPolicyFromOpts",
221
  "GenericMain",
222
  "GenericInstanceCreate",
223
  "GenericList",
224
  "GenericListFields",
225
  "GetClient",
226
  "GetOnlineNodes",
227
  "JobExecutor",
228
  "JobSubmittedException",
229
  "ParseTimespec",
230
  "RunWhileClusterStopped",
231
  "SubmitOpCode",
232
  "SubmitOrSend",
233
  "UsesRPC",
234
  # Formatting functions
235
  "ToStderr", "ToStdout",
236
  "FormatError",
237
  "FormatQueryResult",
238
  "FormatParamsDictInfo",
239
  "FormatPolicyInfo",
240
  "PrintGenericInfo",
241
  "GenerateTable",
242
  "AskUser",
243
  "FormatTimestamp",
244
  "FormatLogMessage",
245
  # Tags functions
246
  "ListTags",
247
  "AddTags",
248
  "RemoveTags",
249
  # command line options support infrastructure
250
  "ARGS_MANY_INSTANCES",
251
  "ARGS_MANY_NODES",
252
  "ARGS_MANY_GROUPS",
253
  "ARGS_MANY_NETWORKS",
254
  "ARGS_NONE",
255
  "ARGS_ONE_INSTANCE",
256
  "ARGS_ONE_NODE",
257
  "ARGS_ONE_GROUP",
258
  "ARGS_ONE_OS",
259
  "ARGS_ONE_NETWORK",
260
  "ArgChoice",
261
  "ArgCommand",
262
  "ArgFile",
263
  "ArgGroup",
264
  "ArgHost",
265
  "ArgInstance",
266
  "ArgJobId",
267
  "ArgNetwork",
268
  "ArgNode",
269
  "ArgOs",
270
  "ArgExtStorage",
271
  "ArgSuggest",
272
  "ArgUnknown",
273
  "OPT_COMPL_INST_ADD_NODES",
274
  "OPT_COMPL_MANY_NODES",
275
  "OPT_COMPL_ONE_IALLOCATOR",
276
  "OPT_COMPL_ONE_INSTANCE",
277
  "OPT_COMPL_ONE_NODE",
278
  "OPT_COMPL_ONE_NODEGROUP",
279
  "OPT_COMPL_ONE_NETWORK",
280
  "OPT_COMPL_ONE_OS",
281
  "OPT_COMPL_ONE_EXTSTORAGE",
282
  "cli_option",
283
  "SplitNodeOption",
284
  "CalculateOSNames",
285
  "ParseFields",
286
  "COMMON_CREATE_OPTS",
287
  ]
288

    
289
NO_PREFIX = "no_"
290
UN_PREFIX = "-"
291

    
292
#: Priorities (sorted)
293
_PRIORITY_NAMES = [
294
  ("low", constants.OP_PRIO_LOW),
295
  ("normal", constants.OP_PRIO_NORMAL),
296
  ("high", constants.OP_PRIO_HIGH),
297
  ]
298

    
299
#: Priority dictionary for easier lookup
300
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
301
# we migrate to Python 2.6
302
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
303

    
304
# Query result status for clients
305
(QR_NORMAL,
306
 QR_UNKNOWN,
307
 QR_INCOMPLETE) = range(3)
308

    
309
#: Maximum batch size for ChooseJob
310
_CHOOSE_BATCH = 25
311

    
312

    
313
# constants used to create InstancePolicy dictionary
314
TISPECS_GROUP_TYPES = {
315
  constants.ISPECS_MIN: constants.VTYPE_INT,
316
  constants.ISPECS_MAX: constants.VTYPE_INT,
317
  }
318

    
319
TISPECS_CLUSTER_TYPES = {
320
  constants.ISPECS_MIN: constants.VTYPE_INT,
321
  constants.ISPECS_MAX: constants.VTYPE_INT,
322
  constants.ISPECS_STD: constants.VTYPE_INT,
323
  }
324

    
325
#: User-friendly names for query2 field types
326
_QFT_NAMES = {
327
  constants.QFT_UNKNOWN: "Unknown",
328
  constants.QFT_TEXT: "Text",
329
  constants.QFT_BOOL: "Boolean",
330
  constants.QFT_NUMBER: "Number",
331
  constants.QFT_UNIT: "Storage size",
332
  constants.QFT_TIMESTAMP: "Timestamp",
333
  constants.QFT_OTHER: "Custom",
334
  }
335

    
336

    
337
class _Argument:
338
  def __init__(self, min=0, max=None): # pylint: disable=W0622
339
    self.min = min
340
    self.max = max
341

    
342
  def __repr__(self):
343
    return ("<%s min=%s max=%s>" %
344
            (self.__class__.__name__, self.min, self.max))
345

    
346

    
347
class ArgSuggest(_Argument):
348
  """Suggesting argument.
349

350
  Value can be any of the ones passed to the constructor.
351

352
  """
353
  # pylint: disable=W0622
354
  def __init__(self, min=0, max=None, choices=None):
355
    _Argument.__init__(self, min=min, max=max)
356
    self.choices = choices
357

    
358
  def __repr__(self):
359
    return ("<%s min=%s max=%s choices=%r>" %
360
            (self.__class__.__name__, self.min, self.max, self.choices))
361

    
362

    
363
class ArgChoice(ArgSuggest):
364
  """Choice argument.
365

366
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
367
  but value must be one of the choices.
368

369
  """
370

    
371

    
372
class ArgUnknown(_Argument):
373
  """Unknown argument to program (e.g. determined at runtime).
374

375
  """
376

    
377

    
378
class ArgInstance(_Argument):
379
  """Instances argument.
380

381
  """
382

    
383

    
384
class ArgNode(_Argument):
385
  """Node argument.
386

387
  """
388

    
389

    
390
class ArgNetwork(_Argument):
391
  """Network argument.
392

393
  """
394

    
395

    
396
class ArgGroup(_Argument):
397
  """Node group argument.
398

399
  """
400

    
401

    
402
class ArgJobId(_Argument):
403
  """Job ID argument.
404

405
  """
406

    
407

    
408
class ArgFile(_Argument):
409
  """File path argument.
410

411
  """
412

    
413

    
414
class ArgCommand(_Argument):
415
  """Command argument.
416

417
  """
418

    
419

    
420
class ArgHost(_Argument):
421
  """Host argument.
422

423
  """
424

    
425

    
426
class ArgOs(_Argument):
427
  """OS argument.
428

429
  """
430

    
431

    
432
class ArgExtStorage(_Argument):
433
  """ExtStorage argument.
434

435
  """
436

    
437

    
438
ARGS_NONE = []
439
ARGS_MANY_INSTANCES = [ArgInstance()]
440
ARGS_MANY_NETWORKS = [ArgNetwork()]
441
ARGS_MANY_NODES = [ArgNode()]
442
ARGS_MANY_GROUPS = [ArgGroup()]
443
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
444
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
445
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
446
# TODO
447
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
448
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
449

    
450

    
451
def _ExtractTagsObject(opts, args):
452
  """Extract the tag type object.
453

454
  Note that this function will modify its args parameter.
455

456
  """
457
  if not hasattr(opts, "tag_type"):
458
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
459
  kind = opts.tag_type
460
  if kind == constants.TAG_CLUSTER:
461
    retval = kind, None
462
  elif kind in (constants.TAG_NODEGROUP,
463
                constants.TAG_NODE,
464
                constants.TAG_NETWORK,
465
                constants.TAG_INSTANCE):
466
    if not args:
467
      raise errors.OpPrereqError("no arguments passed to the command",
468
                                 errors.ECODE_INVAL)
469
    name = args.pop(0)
470
    retval = kind, name
471
  else:
472
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
473
  return retval
474

    
475

    
476
def _ExtendTags(opts, args):
477
  """Extend the args if a source file has been given.
478

479
  This function will extend the tags with the contents of the file
480
  passed in the 'tags_source' attribute of the opts parameter. A file
481
  named '-' will be replaced by stdin.
482

483
  """
484
  fname = opts.tags_source
485
  if fname is None:
486
    return
487
  if fname == "-":
488
    new_fh = sys.stdin
489
  else:
490
    new_fh = open(fname, "r")
491
  new_data = []
492
  try:
493
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
494
    # because of python bug 1633941
495
    while True:
496
      line = new_fh.readline()
497
      if not line:
498
        break
499
      new_data.append(line.strip())
500
  finally:
501
    new_fh.close()
502
  args.extend(new_data)
503

    
504

    
505
def ListTags(opts, args):
506
  """List the tags on a given object.
507

508
  This is a generic implementation that knows how to deal with all
509
  three cases of tag objects (cluster, node, instance). The opts
510
  argument is expected to contain a tag_type field denoting what
511
  object type we work on.
512

513
  """
514
  kind, name = _ExtractTagsObject(opts, args)
515
  cl = GetClient(query=True)
516
  result = cl.QueryTags(kind, name)
517
  result = list(result)
518
  result.sort()
519
  for tag in result:
520
    ToStdout(tag)
521

    
522

    
523
def AddTags(opts, args):
524
  """Add tags on a given object.
525

526
  This is a generic implementation that knows how to deal with all
527
  three cases of tag objects (cluster, node, instance). The opts
528
  argument is expected to contain a tag_type field denoting what
529
  object type we work on.
530

531
  """
532
  kind, name = _ExtractTagsObject(opts, args)
533
  _ExtendTags(opts, args)
534
  if not args:
535
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
536
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
537
  SubmitOrSend(op, opts)
538

    
539

    
540
def RemoveTags(opts, args):
541
  """Remove tags from a given object.
542

543
  This is a generic implementation that knows how to deal with all
544
  three cases of tag objects (cluster, node, instance). The opts
545
  argument is expected to contain a tag_type field denoting what
546
  object type we work on.
547

548
  """
549
  kind, name = _ExtractTagsObject(opts, args)
550
  _ExtendTags(opts, args)
551
  if not args:
552
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
553
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
554
  SubmitOrSend(op, opts)
555

    
556

    
557
def check_unit(option, opt, value): # pylint: disable=W0613
558
  """OptParsers custom converter for units.
559

560
  """
561
  try:
562
    return utils.ParseUnit(value)
563
  except errors.UnitParseError, err:
564
    raise OptionValueError("option %s: %s" % (opt, err))
565

    
566

    
567
def _SplitKeyVal(opt, data, parse_prefixes):
568
  """Convert a KeyVal string into a dict.
569

570
  This function will convert a key=val[,...] string into a dict. Empty
571
  values will be converted specially: keys which have the prefix 'no_'
572
  will have the value=False and the prefix stripped, keys with the prefix
573
  "-" will have value=None and the prefix stripped, and the others will
574
  have value=True.
575

576
  @type opt: string
577
  @param opt: a string holding the option name for which we process the
578
      data, used in building error messages
579
  @type data: string
580
  @param data: a string of the format key=val,key=val,...
581
  @type parse_prefixes: bool
582
  @param parse_prefixes: whether to handle prefixes specially
583
  @rtype: dict
584
  @return: {key=val, key=val}
585
  @raises errors.ParameterError: if there are duplicate keys
586

587
  """
588
  kv_dict = {}
589
  if data:
590
    for elem in utils.UnescapeAndSplit(data, sep=","):
591
      if "=" in elem:
592
        key, val = elem.split("=", 1)
593
      elif parse_prefixes:
594
        if elem.startswith(NO_PREFIX):
595
          key, val = elem[len(NO_PREFIX):], False
596
        elif elem.startswith(UN_PREFIX):
597
          key, val = elem[len(UN_PREFIX):], None
598
        else:
599
          key, val = elem, True
600
      else:
601
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
602
                                    (elem, opt))
603
      if key in kv_dict:
604
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
605
                                    (key, opt))
606
      kv_dict[key] = val
607
  return kv_dict
608

    
609

    
610
def _SplitIdentKeyVal(opt, value, parse_prefixes):
611
  """Helper function to parse "ident:key=val,key=val" options.
612

613
  @type opt: string
614
  @param opt: option name, used in error messages
615
  @type value: string
616
  @param value: expected to be in the format "ident:key=val,key=val,..."
617
  @type parse_prefixes: bool
618
  @param parse_prefixes: whether to handle prefixes specially (see
619
      L{_SplitKeyVal})
620
  @rtype: tuple
621
  @return: (ident, {key=val, key=val})
622
  @raises errors.ParameterError: in case of duplicates or other parsing errors
623

624
  """
625
  if ":" not in value:
626
    ident, rest = value, ""
627
  else:
628
    ident, rest = value.split(":", 1)
629

    
630
  if parse_prefixes and ident.startswith(NO_PREFIX):
631
    if rest:
632
      msg = "Cannot pass options when removing parameter groups: %s" % value
633
      raise errors.ParameterError(msg)
634
    retval = (ident[len(NO_PREFIX):], False)
635
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
636
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
637
    if rest:
638
      msg = "Cannot pass options when removing parameter groups: %s" % value
639
      raise errors.ParameterError(msg)
640
    retval = (ident[len(UN_PREFIX):], None)
641
  else:
642
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
643
    retval = (ident, kv_dict)
644
  return retval
645

    
646

    
647
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
648
  """Custom parser for ident:key=val,key=val options.
649

650
  This will store the parsed values as a tuple (ident, {key: val}). As such,
651
  multiple uses of this option via action=append is possible.
652

653
  """
654
  return _SplitIdentKeyVal(opt, value, True)
655

    
656

    
657
def check_key_val(option, opt, value):  # pylint: disable=W0613
658
  """Custom parser class for key=val,key=val options.
659

660
  This will store the parsed values as a dict {key: val}.
661

662
  """
663
  return _SplitKeyVal(opt, value, True)
664

    
665

    
666
def _SplitListKeyVal(opt, value):
667
  retval = {}
668
  for elem in value.split("/"):
669
    if not elem:
670
      raise errors.ParameterError("Empty section in option '%s'" % opt)
671
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
672
    if ident in retval:
673
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
674
             (ident, opt, elem))
675
      raise errors.ParameterError(msg)
676
    retval[ident] = valdict
677
  return retval
678

    
679

    
680
def check_list_ident_key_val(_, opt, value):
681
  """Custom parser for "ident:key=val,key=val/ident:key=val" options.
682

683
  @rtype: list of dictionary
684
  @return: {ident: {key: val, key: val}, ident: {key: val}}
685

686
  """
687
  return _SplitListKeyVal(opt, value)
688

    
689

    
690
def check_bool(option, opt, value): # pylint: disable=W0613
691
  """Custom parser for yes/no options.
692

693
  This will store the parsed value as either True or False.
694

695
  """
696
  value = value.lower()
697
  if value == constants.VALUE_FALSE or value == "no":
698
    return False
699
  elif value == constants.VALUE_TRUE or value == "yes":
700
    return True
701
  else:
702
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
703

    
704

    
705
def check_list(option, opt, value): # pylint: disable=W0613
706
  """Custom parser for comma-separated lists.
707

708
  """
709
  # we have to make this explicit check since "".split(",") is [""],
710
  # not an empty list :(
711
  if not value:
712
    return []
713
  else:
714
    return utils.UnescapeAndSplit(value)
715

    
716

    
717
def check_maybefloat(option, opt, value): # pylint: disable=W0613
718
  """Custom parser for float numbers which might be also defaults.
719

720
  """
721
  value = value.lower()
722

    
723
  if value == constants.VALUE_DEFAULT:
724
    return value
725
  else:
726
    return float(value)
727

    
728

    
729
# completion_suggestion is normally a list. Using numeric values not evaluating
730
# to False for dynamic completion.
731
(OPT_COMPL_MANY_NODES,
732
 OPT_COMPL_ONE_NODE,
733
 OPT_COMPL_ONE_INSTANCE,
734
 OPT_COMPL_ONE_OS,
735
 OPT_COMPL_ONE_EXTSTORAGE,
736
 OPT_COMPL_ONE_IALLOCATOR,
737
 OPT_COMPL_ONE_NETWORK,
738
 OPT_COMPL_INST_ADD_NODES,
739
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
740

    
741
OPT_COMPL_ALL = compat.UniqueFrozenset([
742
  OPT_COMPL_MANY_NODES,
743
  OPT_COMPL_ONE_NODE,
744
  OPT_COMPL_ONE_INSTANCE,
745
  OPT_COMPL_ONE_OS,
746
  OPT_COMPL_ONE_EXTSTORAGE,
747
  OPT_COMPL_ONE_IALLOCATOR,
748
  OPT_COMPL_ONE_NETWORK,
749
  OPT_COMPL_INST_ADD_NODES,
750
  OPT_COMPL_ONE_NODEGROUP,
751
  ])
752

    
753

    
754
class CliOption(Option):
755
  """Custom option class for optparse.
756

757
  """
758
  ATTRS = Option.ATTRS + [
759
    "completion_suggest",
760
    ]
761
  TYPES = Option.TYPES + (
762
    "listidentkeyval",
763
    "identkeyval",
764
    "keyval",
765
    "unit",
766
    "bool",
767
    "list",
768
    "maybefloat",
769
    )
770
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
771
  TYPE_CHECKER["listidentkeyval"] = check_list_ident_key_val
772
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
773
  TYPE_CHECKER["keyval"] = check_key_val
774
  TYPE_CHECKER["unit"] = check_unit
775
  TYPE_CHECKER["bool"] = check_bool
776
  TYPE_CHECKER["list"] = check_list
777
  TYPE_CHECKER["maybefloat"] = check_maybefloat
778

    
779

    
780
# optparse.py sets make_option, so we do it for our own option class, too
781
cli_option = CliOption
782

    
783

    
784
_YORNO = "yes|no"
785

    
786
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
787
                       help="Increase debugging level")
788

    
789
NOHDR_OPT = cli_option("--no-headers", default=False,
790
                       action="store_true", dest="no_headers",
791
                       help="Don't display column headers")
792

    
793
SEP_OPT = cli_option("--separator", default=None,
794
                     action="store", dest="separator",
795
                     help=("Separator between output fields"
796
                           " (defaults to one space)"))
797

    
798
USEUNITS_OPT = cli_option("--units", default=None,
799
                          dest="units", choices=("h", "m", "g", "t"),
800
                          help="Specify units for output (one of h/m/g/t)")
801

    
802
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
803
                        type="string", metavar="FIELDS",
804
                        help="Comma separated list of output fields")
805

    
806
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
807
                       default=False, help="Force the operation")
808

    
809
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
810
                         default=False, help="Do not require confirmation")
811

    
812
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
813
                                  action="store_true", default=False,
814
                                  help=("Ignore offline nodes and do as much"
815
                                        " as possible"))
816

    
817
TAG_ADD_OPT = cli_option("--tags", dest="tags",
818
                         default=None, help="Comma-separated list of instance"
819
                                            " tags")
820

    
821
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
822
                         default=None, help="File with tag names")
823

    
824
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
825
                        default=False, action="store_true",
826
                        help=("Submit the job and return the job ID, but"
827
                              " don't wait for the job to finish"))
828

    
829
SYNC_OPT = cli_option("--sync", dest="do_locking",
830
                      default=False, action="store_true",
831
                      help=("Grab locks while doing the queries"
832
                            " in order to ensure more consistent results"))
833

    
834
DRY_RUN_OPT = cli_option("--dry-run", default=False,
835
                         action="store_true",
836
                         help=("Do not execute the operation, just run the"
837
                               " check steps and verify if it could be"
838
                               " executed"))
839

    
840
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
841
                         action="store_true",
842
                         help="Increase the verbosity of the operation")
843

    
844
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
845
                              action="store_true", dest="simulate_errors",
846
                              help="Debugging option that makes the operation"
847
                              " treat most runtime checks as failed")
848

    
849
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
850
                        default=True, action="store_false",
851
                        help="Don't wait for sync (DANGEROUS!)")
852

    
853
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
854
                        default=False, action="store_true",
855
                        help="Wait for disks to sync")
856

    
857
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
858
                             action="store_true", default=False,
859
                             help="Enable offline instance")
860

    
861
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
862
                              action="store_true", default=False,
863
                              help="Disable down instance")
864

    
865
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
866
                               help=("Custom disk setup (%s)" %
867
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
868
                               default=None, metavar="TEMPL",
869
                               choices=list(constants.DISK_TEMPLATES))
870

    
871
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
872
                        help="Do not create any network cards for"
873
                        " the instance")
874

    
875
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
876
                               help="Relative path under default cluster-wide"
877
                               " file storage dir to store file-based disks",
878
                               default=None, metavar="<DIR>")
879

    
880
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
881
                                  help="Driver to use for image files",
882
                                  default="loop", metavar="<DRIVER>",
883
                                  choices=list(constants.FILE_DRIVER))
884

    
885
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
886
                            help="Select nodes for the instance automatically"
887
                            " using the <NAME> iallocator plugin",
888
                            default=None, type="string",
889
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
890

    
891
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
892
                                    metavar="<NAME>",
893
                                    help="Set the default instance"
894
                                    " allocator plugin",
895
                                    default=None, type="string",
896
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
897

    
898
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
899
                    metavar="<os>",
900
                    completion_suggest=OPT_COMPL_ONE_OS)
901

    
902
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
903
                          type="keyval", default={},
904
                          help="OS parameters")
905

    
906
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
907
                               action="store_true", default=False,
908
                               help="Force an unknown variant")
909

    
910
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
911
                            action="store_true", default=False,
912
                            help="Do not install the OS (will"
913
                            " enable no-start)")
914

    
915
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
916
                                dest="allow_runtime_chgs",
917
                                default=True, action="store_false",
918
                                help="Don't allow runtime changes")
919

    
920
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
921
                         type="keyval", default={},
922
                         help="Backend parameters")
923

    
924
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
925
                        default={}, dest="hvparams",
926
                        help="Hypervisor parameters")
927

    
928
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
929
                             help="Disk template parameters, in the format"
930
                             " template:option=value,option=value,...",
931
                             type="identkeyval", action="append", default=[])
932

    
933
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
934
                                 type="keyval", default={},
935
                                 help="Memory size specs: list of key=value,"
936
                                " where key is one of min, max, std"
937
                                 " (in MB or using a unit)")
938

    
939
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
940
                                 type="keyval", default={},
941
                                 help="CPU count specs: list of key=value,"
942
                                 " where key is one of min, max, std")
943

    
944
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
945
                                  dest="ispecs_disk_count",
946
                                  type="keyval", default={},
947
                                  help="Disk count specs: list of key=value,"
948
                                  " where key is one of min, max, std")
949

    
950
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
951
                                 type="keyval", default={},
952
                                 help="Disk size specs: list of key=value,"
953
                                 " where key is one of min, max, std"
954
                                 " (in MB or using a unit)")
955

    
956
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
957
                                 type="keyval", default={},
958
                                 help="NIC count specs: list of key=value,"
959
                                 " where key is one of min, max, std")
960

    
961
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
962
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
963
                                      dest="ipolicy_bounds_specs",
964
                                      type="listidentkeyval", default=None,
965
                                      help="Complete instance specs limits")
966

    
967
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
968
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
969
                                   dest="ipolicy_std_specs",
970
                                   type="keyval", default=None,
971
                                   help="Complte standard instance specs")
972

    
973
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
974
                                    dest="ipolicy_disk_templates",
975
                                    type="list", default=None,
976
                                    help="Comma-separated list of"
977
                                    " enabled disk templates")
978

    
979
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
980
                                 dest="ipolicy_vcpu_ratio",
981
                                 type="maybefloat", default=None,
982
                                 help="The maximum allowed vcpu-to-cpu ratio")
983

    
984
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
985
                                   dest="ipolicy_spindle_ratio",
986
                                   type="maybefloat", default=None,
987
                                   help=("The maximum allowed instances to"
988
                                         " spindle ratio"))
989

    
990
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
991
                            help="Hypervisor and hypervisor options, in the"
992
                            " format hypervisor:option=value,option=value,...",
993
                            default=None, type="identkeyval")
994

    
995
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
996
                        help="Hypervisor and hypervisor options, in the"
997
                        " format hypervisor:option=value,option=value,...",
998
                        default=[], action="append", type="identkeyval")
999

    
1000
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1001
                           action="store_false",
1002
                           help="Don't check that the instance's IP"
1003
                           " is alive")
1004

    
1005
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1006
                             default=True, action="store_false",
1007
                             help="Don't check that the instance's name"
1008
                             " is resolvable")
1009

    
1010
NET_OPT = cli_option("--net",
1011
                     help="NIC parameters", default=[],
1012
                     dest="nics", action="append", type="identkeyval")
1013

    
1014
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1015
                      dest="disks", action="append", type="identkeyval")
1016

    
1017
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1018
                         help="Comma-separated list of disks"
1019
                         " indices to act on (e.g. 0,2) (optional,"
1020
                         " defaults to all disks)")
1021

    
1022
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1023
                         help="Enforces a single-disk configuration using the"
1024
                         " given disk size, in MiB unless a suffix is used",
1025
                         default=None, type="unit", metavar="<size>")
1026

    
1027
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1028
                                dest="ignore_consistency",
1029
                                action="store_true", default=False,
1030
                                help="Ignore the consistency of the disks on"
1031
                                " the secondary")
1032

    
1033
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1034
                                dest="allow_failover",
1035
                                action="store_true", default=False,
1036
                                help="If migration is not possible fallback to"
1037
                                     " failover")
1038

    
1039
NONLIVE_OPT = cli_option("--non-live", dest="live",
1040
                         default=True, action="store_false",
1041
                         help="Do a non-live migration (this usually means"
1042
                         " freeze the instance, save the state, transfer and"
1043
                         " only then resume running on the secondary node)")
1044

    
1045
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1046
                                default=None,
1047
                                choices=list(constants.HT_MIGRATION_MODES),
1048
                                help="Override default migration mode (choose"
1049
                                " either live or non-live")
1050

    
1051
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1052
                                help="Target node and optional secondary node",
1053
                                metavar="<pnode>[:<snode>]",
1054
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1055

    
1056
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1057
                           action="append", metavar="<node>",
1058
                           help="Use only this node (can be used multiple"
1059
                           " times, if not given defaults to all nodes)",
1060
                           completion_suggest=OPT_COMPL_ONE_NODE)
1061

    
1062
NODEGROUP_OPT_NAME = "--node-group"
1063
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1064
                           dest="nodegroup",
1065
                           help="Node group (name or uuid)",
1066
                           metavar="<nodegroup>",
1067
                           default=None, type="string",
1068
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1069

    
1070
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1071
                             metavar="<node>",
1072
                             completion_suggest=OPT_COMPL_ONE_NODE)
1073

    
1074
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1075
                         action="store_false",
1076
                         help="Don't start the instance after creation")
1077

    
1078
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1079
                         action="store_true", default=False,
1080
                         help="Show command instead of executing it")
1081

    
1082
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1083
                         default=False, action="store_true",
1084
                         help="Instead of performing the migration, try to"
1085
                         " recover from a failed cleanup. This is safe"
1086
                         " to run even if the instance is healthy, but it"
1087
                         " will create extra replication traffic and "
1088
                         " disrupt briefly the replication (like during the"
1089
                         " migration")
1090

    
1091
STATIC_OPT = cli_option("-s", "--static", dest="static",
1092
                        action="store_true", default=False,
1093
                        help="Only show configuration data, not runtime data")
1094

    
1095
ALL_OPT = cli_option("--all", dest="show_all",
1096
                     default=False, action="store_true",
1097
                     help="Show info on all instances on the cluster."
1098
                     " This can take a long time to run, use wisely")
1099

    
1100
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1101
                           action="store_true", default=False,
1102
                           help="Interactive OS reinstall, lists available"
1103
                           " OS templates for selection")
1104

    
1105
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1106
                                 action="store_true", default=False,
1107
                                 help="Remove the instance from the cluster"
1108
                                 " configuration even if there are failures"
1109
                                 " during the removal process")
1110

    
1111
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1112
                                        dest="ignore_remove_failures",
1113
                                        action="store_true", default=False,
1114
                                        help="Remove the instance from the"
1115
                                        " cluster configuration even if there"
1116
                                        " are failures during the removal"
1117
                                        " process")
1118

    
1119
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1120
                                 action="store_true", default=False,
1121
                                 help="Remove the instance from the cluster")
1122

    
1123
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1124
                               help="Specifies the new node for the instance",
1125
                               metavar="NODE", default=None,
1126
                               completion_suggest=OPT_COMPL_ONE_NODE)
1127

    
1128
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1129
                               help="Specifies the new secondary node",
1130
                               metavar="NODE", default=None,
1131
                               completion_suggest=OPT_COMPL_ONE_NODE)
1132

    
1133
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1134
                             help="Specifies the new primary node",
1135
                             metavar="<node>", default=None,
1136
                             completion_suggest=OPT_COMPL_ONE_NODE)
1137

    
1138
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1139
                            default=False, action="store_true",
1140
                            help="Replace the disk(s) on the primary"
1141
                                 " node (applies only to internally mirrored"
1142
                                 " disk templates, e.g. %s)" %
1143
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1144

    
1145
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1146
                              default=False, action="store_true",
1147
                              help="Replace the disk(s) on the secondary"
1148
                                   " node (applies only to internally mirrored"
1149
                                   " disk templates, e.g. %s)" %
1150
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1151

    
1152
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1153
                              default=False, action="store_true",
1154
                              help="Lock all nodes and auto-promote as needed"
1155
                              " to MC status")
1156

    
1157
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1158
                              default=False, action="store_true",
1159
                              help="Automatically replace faulty disks"
1160
                                   " (applies only to internally mirrored"
1161
                                   " disk templates, e.g. %s)" %
1162
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1163

    
1164
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1165
                             default=False, action="store_true",
1166
                             help="Ignore current recorded size"
1167
                             " (useful for forcing activation when"
1168
                             " the recorded size is wrong)")
1169

    
1170
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1171
                          metavar="<node>",
1172
                          completion_suggest=OPT_COMPL_ONE_NODE)
1173

    
1174
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1175
                         metavar="<dir>")
1176

    
1177
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1178
                              help="Specify the secondary ip for the node",
1179
                              metavar="ADDRESS", default=None)
1180

    
1181
READD_OPT = cli_option("--readd", dest="readd",
1182
                       default=False, action="store_true",
1183
                       help="Readd old node after replacing it")
1184

    
1185
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1186
                                default=True, action="store_false",
1187
                                help="Disable SSH key fingerprint checking")
1188

    
1189
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1190
                                 default=False, action="store_true",
1191
                                 help="Force the joining of a node")
1192

    
1193
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1194
                    type="bool", default=None, metavar=_YORNO,
1195
                    help="Set the master_candidate flag on the node")
1196

    
1197
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1198
                         type="bool", default=None,
1199
                         help=("Set the offline flag on the node"
1200
                               " (cluster does not communicate with offline"
1201
                               " nodes)"))
1202

    
1203
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1204
                         type="bool", default=None,
1205
                         help=("Set the drained flag on the node"
1206
                               " (excluded from allocation operations)"))
1207

    
1208
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1209
                              type="bool", default=None, metavar=_YORNO,
1210
                              help="Set the master_capable flag on the node")
1211

    
1212
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1213
                          type="bool", default=None, metavar=_YORNO,
1214
                          help="Set the vm_capable flag on the node")
1215

    
1216
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1217
                             type="bool", default=None, metavar=_YORNO,
1218
                             help="Set the allocatable flag on a volume")
1219

    
1220
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1221
                               help="Disable support for lvm based instances"
1222
                               " (cluster-wide)",
1223
                               action="store_false", default=True)
1224

    
1225
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1226
                            dest="enabled_hypervisors",
1227
                            help="Comma-separated list of hypervisors",
1228
                            type="string", default=None)
1229

    
1230
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1231
                                        dest="enabled_disk_templates",
1232
                                        help="Comma-separated list of "
1233
                                             "disk templates",
1234
                                        type="string", default=None)
1235

    
1236
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1237
                            type="keyval", default={},
1238
                            help="NIC parameters")
1239

    
1240
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1241
                         dest="candidate_pool_size", type="int",
1242
                         help="Set the candidate pool size")
1243

    
1244
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1245
                         help=("Enables LVM and specifies the volume group"
1246
                               " name (cluster-wide) for disk allocation"
1247
                               " [%s]" % constants.DEFAULT_VG),
1248
                         metavar="VG", default=None)
1249

    
1250
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1251
                          help="Destroy cluster", action="store_true")
1252

    
1253
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1254
                          help="Skip node agreement check (dangerous)",
1255
                          action="store_true", default=False)
1256

    
1257
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1258
                            help="Specify the mac prefix for the instance IP"
1259
                            " addresses, in the format XX:XX:XX",
1260
                            metavar="PREFIX",
1261
                            default=None)
1262

    
1263
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1264
                               help="Specify the node interface (cluster-wide)"
1265
                               " on which the master IP address will be added"
1266
                               " (cluster init default: %s)" %
1267
                               constants.DEFAULT_BRIDGE,
1268
                               metavar="NETDEV",
1269
                               default=None)
1270

    
1271
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1272
                                help="Specify the netmask of the master IP",
1273
                                metavar="NETMASK",
1274
                                default=None)
1275

    
1276
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1277
                                     dest="use_external_mip_script",
1278
                                     help="Specify whether to run a"
1279
                                     " user-provided script for the master"
1280
                                     " IP address turnup and"
1281
                                     " turndown operations",
1282
                                     type="bool", metavar=_YORNO, default=None)
1283

    
1284
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1285
                                help="Specify the default directory (cluster-"
1286
                                "wide) for storing the file-based disks [%s]" %
1287
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1288
                                metavar="DIR",
1289
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1290

    
1291
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1292
  "--shared-file-storage-dir",
1293
  dest="shared_file_storage_dir",
1294
  help="Specify the default directory (cluster-wide) for storing the"
1295
  " shared file-based disks [%s]" %
1296
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1297
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1298

    
1299
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1300
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1301
                                   action="store_false", default=True)
1302

    
1303
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1304
                                    help="Don't initialize SSH keys",
1305
                                    action="store_false", default=True)
1306

    
1307
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1308
                             help="Enable parseable error messages",
1309
                             action="store_true", default=False)
1310

    
1311
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1312
                          help="Skip N+1 memory redundancy tests",
1313
                          action="store_true", default=False)
1314

    
1315
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1316
                             help="Type of reboot: soft/hard/full",
1317
                             default=constants.INSTANCE_REBOOT_HARD,
1318
                             metavar="<REBOOT>",
1319
                             choices=list(constants.REBOOT_TYPES))
1320

    
1321
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1322
                                    dest="ignore_secondaries",
1323
                                    default=False, action="store_true",
1324
                                    help="Ignore errors from secondaries")
1325

    
1326
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1327
                            action="store_false", default=True,
1328
                            help="Don't shutdown the instance (unsafe)")
1329

    
1330
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1331
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1332
                         help="Maximum time to wait")
1333

    
1334
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1335
                                  dest="shutdown_timeout", type="int",
1336
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1337
                                  help="Maximum time to wait for instance"
1338
                                  " shutdown")
1339

    
1340
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1341
                          default=None,
1342
                          help=("Number of seconds between repetions of the"
1343
                                " command"))
1344

    
1345
EARLY_RELEASE_OPT = cli_option("--early-release",
1346
                               dest="early_release", default=False,
1347
                               action="store_true",
1348
                               help="Release the locks on the secondary"
1349
                               " node(s) early")
1350

    
1351
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1352
                                  dest="new_cluster_cert",
1353
                                  default=False, action="store_true",
1354
                                  help="Generate a new cluster certificate")
1355

    
1356
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1357
                           default=None,
1358
                           help="File containing new RAPI certificate")
1359

    
1360
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1361
                               default=None, action="store_true",
1362
                               help=("Generate a new self-signed RAPI"
1363
                                     " certificate"))
1364

    
1365
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1366
                            default=None,
1367
                            help="File containing new SPICE certificate")
1368

    
1369
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1370
                              default=None,
1371
                              help="File containing the certificate of the CA"
1372
                              " which signed the SPICE certificate")
1373

    
1374
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1375
                                dest="new_spice_cert", default=None,
1376
                                action="store_true",
1377
                                help=("Generate a new self-signed SPICE"
1378
                                      " certificate"))
1379

    
1380
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1381
                                    dest="new_confd_hmac_key",
1382
                                    default=False, action="store_true",
1383
                                    help=("Create a new HMAC key for %s" %
1384
                                          constants.CONFD))
1385

    
1386
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1387
                                       dest="cluster_domain_secret",
1388
                                       default=None,
1389
                                       help=("Load new new cluster domain"
1390
                                             " secret from file"))
1391

    
1392
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1393
                                           dest="new_cluster_domain_secret",
1394
                                           default=False, action="store_true",
1395
                                           help=("Create a new cluster domain"
1396
                                                 " secret"))
1397

    
1398
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1399
                              dest="use_replication_network",
1400
                              help="Whether to use the replication network"
1401
                              " for talking to the nodes",
1402
                              action="store_true", default=False)
1403

    
1404
MAINTAIN_NODE_HEALTH_OPT = \
1405
    cli_option("--maintain-node-health", dest="maintain_node_health",
1406
               metavar=_YORNO, default=None, type="bool",
1407
               help="Configure the cluster to automatically maintain node"
1408
               " health, by shutting down unknown instances, shutting down"
1409
               " unknown DRBD devices, etc.")
1410

    
1411
IDENTIFY_DEFAULTS_OPT = \
1412
    cli_option("--identify-defaults", dest="identify_defaults",
1413
               default=False, action="store_true",
1414
               help="Identify which saved instance parameters are equal to"
1415
               " the current cluster defaults and set them as such, instead"
1416
               " of marking them as overridden")
1417

    
1418
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1419
                         action="store", dest="uid_pool",
1420
                         help=("A list of user-ids or user-id"
1421
                               " ranges separated by commas"))
1422

    
1423
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1424
                          action="store", dest="add_uids",
1425
                          help=("A list of user-ids or user-id"
1426
                                " ranges separated by commas, to be"
1427
                                " added to the user-id pool"))
1428

    
1429
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1430
                             action="store", dest="remove_uids",
1431
                             help=("A list of user-ids or user-id"
1432
                                   " ranges separated by commas, to be"
1433
                                   " removed from the user-id pool"))
1434

    
1435
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1436
                              action="store", dest="reserved_lvs",
1437
                              help=("A comma-separated list of reserved"
1438
                                    " logical volumes names, that will be"
1439
                                    " ignored by cluster verify"))
1440

    
1441
ROMAN_OPT = cli_option("--roman",
1442
                       dest="roman_integers", default=False,
1443
                       action="store_true",
1444
                       help="Use roman numbers for positive integers")
1445

    
1446
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1447
                             action="store", default=None,
1448
                             help="Specifies usermode helper for DRBD")
1449

    
1450
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1451
                                action="store_false", default=True,
1452
                                help="Disable support for DRBD")
1453

    
1454
PRIMARY_IP_VERSION_OPT = \
1455
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1456
               action="store", dest="primary_ip_version",
1457
               metavar="%d|%d" % (constants.IP4_VERSION,
1458
                                  constants.IP6_VERSION),
1459
               help="Cluster-wide IP version for primary IP")
1460

    
1461
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1462
                              action="store_true",
1463
                              help="Show machine name for every line in output")
1464

    
1465
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1466
                              action="store_true",
1467
                              help=("Hide successful results and show failures"
1468
                                    " only (determined by the exit code)"))
1469

    
1470
REASON_OPT = cli_option("--reason", default=None,
1471
                        help="The reason for executing the command")
1472

    
1473

    
1474
def _PriorityOptionCb(option, _, value, parser):
1475
  """Callback for processing C{--priority} option.
1476

1477
  """
1478
  value = _PRIONAME_TO_VALUE[value]
1479

    
1480
  setattr(parser.values, option.dest, value)
1481

    
1482

    
1483
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1484
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1485
                          choices=_PRIONAME_TO_VALUE.keys(),
1486
                          action="callback", type="choice",
1487
                          callback=_PriorityOptionCb,
1488
                          help="Priority for opcode processing")
1489

    
1490
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1491
                        type="bool", default=None, metavar=_YORNO,
1492
                        help="Sets the hidden flag on the OS")
1493

    
1494
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1495
                        type="bool", default=None, metavar=_YORNO,
1496
                        help="Sets the blacklisted flag on the OS")
1497

    
1498
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1499
                                     type="bool", metavar=_YORNO,
1500
                                     dest="prealloc_wipe_disks",
1501
                                     help=("Wipe disks prior to instance"
1502
                                           " creation"))
1503

    
1504
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1505
                             type="keyval", default=None,
1506
                             help="Node parameters")
1507

    
1508
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1509
                              action="store", metavar="POLICY", default=None,
1510
                              help="Allocation policy for the node group")
1511

    
1512
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1513
                              type="bool", metavar=_YORNO,
1514
                              dest="node_powered",
1515
                              help="Specify if the SoR for node is powered")
1516

    
1517
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1518
                             default=constants.OOB_TIMEOUT,
1519
                             help="Maximum time to wait for out-of-band helper")
1520

    
1521
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1522
                             default=constants.OOB_POWER_DELAY,
1523
                             help="Time in seconds to wait between power-ons")
1524

    
1525
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1526
                              action="store_true", default=False,
1527
                              help=("Whether command argument should be treated"
1528
                                    " as filter"))
1529

    
1530
NO_REMEMBER_OPT = cli_option("--no-remember",
1531
                             dest="no_remember",
1532
                             action="store_true", default=False,
1533
                             help="Perform but do not record the change"
1534
                             " in the configuration")
1535

    
1536
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1537
                              default=False, action="store_true",
1538
                              help="Evacuate primary instances only")
1539

    
1540
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1541
                                default=False, action="store_true",
1542
                                help="Evacuate secondary instances only"
1543
                                     " (applies only to internally mirrored"
1544
                                     " disk templates, e.g. %s)" %
1545
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1546

    
1547
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1548
                                action="store_true", default=False,
1549
                                help="Pause instance at startup")
1550

    
1551
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1552
                          help="Destination node group (name or uuid)",
1553
                          default=None, action="append",
1554
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1555

    
1556
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1557
                               action="append", dest="ignore_errors",
1558
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1559
                               help="Error code to be ignored")
1560

    
1561
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1562
                            action="append",
1563
                            help=("Specify disk state information in the"
1564
                                  " format"
1565
                                  " storage_type/identifier:option=value,...;"
1566
                                  " note this is unused for now"),
1567
                            type="identkeyval")
1568

    
1569
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1570
                          action="append",
1571
                          help=("Specify hypervisor state information in the"
1572
                                " format hypervisor:option=value,...;"
1573
                                " note this is unused for now"),
1574
                          type="identkeyval")
1575

    
1576
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1577
                                action="store_true", default=False,
1578
                                help="Ignore instance policy violations")
1579

    
1580
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1581
                             help="Sets the instance's runtime memory,"
1582
                             " ballooning it up or down to the new value",
1583
                             default=None, type="unit", metavar="<size>")
1584

    
1585
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1586
                          action="store_true", default=False,
1587
                          help="Marks the grow as absolute instead of the"
1588
                          " (default) relative mode")
1589

    
1590
NETWORK_OPT = cli_option("--network",
1591
                         action="store", default=None, dest="network",
1592
                         help="IP network in CIDR notation")
1593

    
1594
GATEWAY_OPT = cli_option("--gateway",
1595
                         action="store", default=None, dest="gateway",
1596
                         help="IP address of the router (gateway)")
1597

    
1598
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1599
                                  action="store", default=None,
1600
                                  dest="add_reserved_ips",
1601
                                  help="Comma-separated list of"
1602
                                  " reserved IPs to add")
1603

    
1604
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1605
                                     action="store", default=None,
1606
                                     dest="remove_reserved_ips",
1607
                                     help="Comma-delimited list of"
1608
                                     " reserved IPs to remove")
1609

    
1610
NETWORK6_OPT = cli_option("--network6",
1611
                          action="store", default=None, dest="network6",
1612
                          help="IP network in CIDR notation")
1613

    
1614
GATEWAY6_OPT = cli_option("--gateway6",
1615
                          action="store", default=None, dest="gateway6",
1616
                          help="IP6 address of the router (gateway)")
1617

    
1618
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1619
                                  dest="conflicts_check",
1620
                                  default=True,
1621
                                  action="store_false",
1622
                                  help="Don't check for conflicting IPs")
1623

    
1624
#: Options provided by all commands
1625
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1626

    
1627
# common options for creating instances. add and import then add their own
1628
# specific ones.
1629
COMMON_CREATE_OPTS = [
1630
  BACKEND_OPT,
1631
  DISK_OPT,
1632
  DISK_TEMPLATE_OPT,
1633
  FILESTORE_DIR_OPT,
1634
  FILESTORE_DRIVER_OPT,
1635
  HYPERVISOR_OPT,
1636
  IALLOCATOR_OPT,
1637
  NET_OPT,
1638
  NODE_PLACEMENT_OPT,
1639
  NOIPCHECK_OPT,
1640
  NOCONFLICTSCHECK_OPT,
1641
  NONAMECHECK_OPT,
1642
  NONICS_OPT,
1643
  NWSYNC_OPT,
1644
  OSPARAMS_OPT,
1645
  OS_SIZE_OPT,
1646
  SUBMIT_OPT,
1647
  TAG_ADD_OPT,
1648
  DRY_RUN_OPT,
1649
  PRIORITY_OPT,
1650
  ]
1651

    
1652
# common instance policy options
1653
INSTANCE_POLICY_OPTS = [
1654
  SPECS_CPU_COUNT_OPT,
1655
  SPECS_DISK_COUNT_OPT,
1656
  SPECS_DISK_SIZE_OPT,
1657
  SPECS_MEM_SIZE_OPT,
1658
  SPECS_NIC_COUNT_OPT,
1659
  IPOLICY_BOUNDS_SPECS_OPT,
1660
  IPOLICY_DISK_TEMPLATES,
1661
  IPOLICY_VCPU_RATIO,
1662
  IPOLICY_SPINDLE_RATIO,
1663
  ]
1664

    
1665

    
1666
class _ShowUsage(Exception):
1667
  """Exception class for L{_ParseArgs}.
1668

1669
  """
1670
  def __init__(self, exit_error):
1671
    """Initializes instances of this class.
1672

1673
    @type exit_error: bool
1674
    @param exit_error: Whether to report failure on exit
1675

1676
    """
1677
    Exception.__init__(self)
1678
    self.exit_error = exit_error
1679

    
1680

    
1681
class _ShowVersion(Exception):
1682
  """Exception class for L{_ParseArgs}.
1683

1684
  """
1685

    
1686

    
1687
def _ParseArgs(binary, argv, commands, aliases, env_override):
1688
  """Parser for the command line arguments.
1689

1690
  This function parses the arguments and returns the function which
1691
  must be executed together with its (modified) arguments.
1692

1693
  @param binary: Script name
1694
  @param argv: Command line arguments
1695
  @param commands: Dictionary containing command definitions
1696
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1697
  @param env_override: list of env variables allowed for default args
1698
  @raise _ShowUsage: If usage description should be shown
1699
  @raise _ShowVersion: If version should be shown
1700

1701
  """
1702
  assert not (env_override - set(commands))
1703
  assert not (set(aliases.keys()) & set(commands.keys()))
1704

    
1705
  if len(argv) > 1:
1706
    cmd = argv[1]
1707
  else:
1708
    # No option or command given
1709
    raise _ShowUsage(exit_error=True)
1710

    
1711
  if cmd == "--version":
1712
    raise _ShowVersion()
1713
  elif cmd == "--help":
1714
    raise _ShowUsage(exit_error=False)
1715
  elif not (cmd in commands or cmd in aliases):
1716
    raise _ShowUsage(exit_error=True)
1717

    
1718
  # get command, unalias it, and look it up in commands
1719
  if cmd in aliases:
1720
    if aliases[cmd] not in commands:
1721
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1722
                                   " command '%s'" % (cmd, aliases[cmd]))
1723

    
1724
    cmd = aliases[cmd]
1725

    
1726
  if cmd in env_override:
1727
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1728
    env_args = os.environ.get(args_env_name)
1729
    if env_args:
1730
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1731

    
1732
  func, args_def, parser_opts, usage, description = commands[cmd]
1733
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1734
                        description=description,
1735
                        formatter=TitledHelpFormatter(),
1736
                        usage="%%prog %s %s" % (cmd, usage))
1737
  parser.disable_interspersed_args()
1738
  options, args = parser.parse_args(args=argv[2:])
1739

    
1740
  if not _CheckArguments(cmd, args_def, args):
1741
    return None, None, None
1742

    
1743
  return func, options, args
1744

    
1745

    
1746
def _FormatUsage(binary, commands):
1747
  """Generates a nice description of all commands.
1748

1749
  @param binary: Script name
1750
  @param commands: Dictionary containing command definitions
1751

1752
  """
1753
  # compute the max line length for cmd + usage
1754
  mlen = min(60, max(map(len, commands)))
1755

    
1756
  yield "Usage: %s {command} [options...] [argument...]" % binary
1757
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1758
  yield ""
1759
  yield "Commands:"
1760

    
1761
  # and format a nice command list
1762
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1763
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1764
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1765
    for line in help_lines:
1766
      yield " %-*s   %s" % (mlen, "", line)
1767

    
1768
  yield ""
1769

    
1770

    
1771
def _CheckArguments(cmd, args_def, args):
1772
  """Verifies the arguments using the argument definition.
1773

1774
  Algorithm:
1775

1776
    1. Abort with error if values specified by user but none expected.
1777

1778
    1. For each argument in definition
1779

1780
      1. Keep running count of minimum number of values (min_count)
1781
      1. Keep running count of maximum number of values (max_count)
1782
      1. If it has an unlimited number of values
1783

1784
        1. Abort with error if it's not the last argument in the definition
1785

1786
    1. If last argument has limited number of values
1787

1788
      1. Abort with error if number of values doesn't match or is too large
1789

1790
    1. Abort with error if user didn't pass enough values (min_count)
1791

1792
  """
1793
  if args and not args_def:
1794
    ToStderr("Error: Command %s expects no arguments", cmd)
1795
    return False
1796

    
1797
  min_count = None
1798
  max_count = None
1799
  check_max = None
1800

    
1801
  last_idx = len(args_def) - 1
1802

    
1803
  for idx, arg in enumerate(args_def):
1804
    if min_count is None:
1805
      min_count = arg.min
1806
    elif arg.min is not None:
1807
      min_count += arg.min
1808

    
1809
    if max_count is None:
1810
      max_count = arg.max
1811
    elif arg.max is not None:
1812
      max_count += arg.max
1813

    
1814
    if idx == last_idx:
1815
      check_max = (arg.max is not None)
1816

    
1817
    elif arg.max is None:
1818
      raise errors.ProgrammerError("Only the last argument can have max=None")
1819

    
1820
  if check_max:
1821
    # Command with exact number of arguments
1822
    if (min_count is not None and max_count is not None and
1823
        min_count == max_count and len(args) != min_count):
1824
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1825
      return False
1826

    
1827
    # Command with limited number of arguments
1828
    if max_count is not None and len(args) > max_count:
1829
      ToStderr("Error: Command %s expects only %d argument(s)",
1830
               cmd, max_count)
1831
      return False
1832

    
1833
  # Command with some required arguments
1834
  if min_count is not None and len(args) < min_count:
1835
    ToStderr("Error: Command %s expects at least %d argument(s)",
1836
             cmd, min_count)
1837
    return False
1838

    
1839
  return True
1840

    
1841

    
1842
def SplitNodeOption(value):
1843
  """Splits the value of a --node option.
1844

1845
  """
1846
  if value and ":" in value:
1847
    return value.split(":", 1)
1848
  else:
1849
    return (value, None)
1850

    
1851

    
1852
def CalculateOSNames(os_name, os_variants):
1853
  """Calculates all the names an OS can be called, according to its variants.
1854

1855
  @type os_name: string
1856
  @param os_name: base name of the os
1857
  @type os_variants: list or None
1858
  @param os_variants: list of supported variants
1859
  @rtype: list
1860
  @return: list of valid names
1861

1862
  """
1863
  if os_variants:
1864
    return ["%s+%s" % (os_name, v) for v in os_variants]
1865
  else:
1866
    return [os_name]
1867

    
1868

    
1869
def ParseFields(selected, default):
1870
  """Parses the values of "--field"-like options.
1871

1872
  @type selected: string or None
1873
  @param selected: User-selected options
1874
  @type default: list
1875
  @param default: Default fields
1876

1877
  """
1878
  if selected is None:
1879
    return default
1880

    
1881
  if selected.startswith("+"):
1882
    return default + selected[1:].split(",")
1883

    
1884
  return selected.split(",")
1885

    
1886

    
1887
UsesRPC = rpc.RunWithRPC
1888

    
1889

    
1890
def AskUser(text, choices=None):
1891
  """Ask the user a question.
1892

1893
  @param text: the question to ask
1894

1895
  @param choices: list with elements tuples (input_char, return_value,
1896
      description); if not given, it will default to: [('y', True,
1897
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1898
      note that the '?' char is reserved for help
1899

1900
  @return: one of the return values from the choices list; if input is
1901
      not possible (i.e. not running with a tty, we return the last
1902
      entry from the list
1903

1904
  """
1905
  if choices is None:
1906
    choices = [("y", True, "Perform the operation"),
1907
               ("n", False, "Do not perform the operation")]
1908
  if not choices or not isinstance(choices, list):
1909
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1910
  for entry in choices:
1911
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1912
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1913

    
1914
  answer = choices[-1][1]
1915
  new_text = []
1916
  for line in text.splitlines():
1917
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1918
  text = "\n".join(new_text)
1919
  try:
1920
    f = file("/dev/tty", "a+")
1921
  except IOError:
1922
    return answer
1923
  try:
1924
    chars = [entry[0] for entry in choices]
1925
    chars[-1] = "[%s]" % chars[-1]
1926
    chars.append("?")
1927
    maps = dict([(entry[0], entry[1]) for entry in choices])
1928
    while True:
1929
      f.write(text)
1930
      f.write("\n")
1931
      f.write("/".join(chars))
1932
      f.write(": ")
1933
      line = f.readline(2).strip().lower()
1934
      if line in maps:
1935
        answer = maps[line]
1936
        break
1937
      elif line == "?":
1938
        for entry in choices:
1939
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1940
        f.write("\n")
1941
        continue
1942
  finally:
1943
    f.close()
1944
  return answer
1945

    
1946

    
1947
class JobSubmittedException(Exception):
1948
  """Job was submitted, client should exit.
1949

1950
  This exception has one argument, the ID of the job that was
1951
  submitted. The handler should print this ID.
1952

1953
  This is not an error, just a structured way to exit from clients.
1954

1955
  """
1956

    
1957

    
1958
def SendJob(ops, cl=None):
1959
  """Function to submit an opcode without waiting for the results.
1960

1961
  @type ops: list
1962
  @param ops: list of opcodes
1963
  @type cl: luxi.Client
1964
  @param cl: the luxi client to use for communicating with the master;
1965
             if None, a new client will be created
1966

1967
  """
1968
  if cl is None:
1969
    cl = GetClient()
1970

    
1971
  job_id = cl.SubmitJob(ops)
1972

    
1973
  return job_id
1974

    
1975

    
1976
def GenericPollJob(job_id, cbs, report_cbs):
1977
  """Generic job-polling function.
1978

1979
  @type job_id: number
1980
  @param job_id: Job ID
1981
  @type cbs: Instance of L{JobPollCbBase}
1982
  @param cbs: Data callbacks
1983
  @type report_cbs: Instance of L{JobPollReportCbBase}
1984
  @param report_cbs: Reporting callbacks
1985

1986
  """
1987
  prev_job_info = None
1988
  prev_logmsg_serial = None
1989

    
1990
  status = None
1991

    
1992
  while True:
1993
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1994
                                      prev_logmsg_serial)
1995
    if not result:
1996
      # job not found, go away!
1997
      raise errors.JobLost("Job with id %s lost" % job_id)
1998

    
1999
    if result == constants.JOB_NOTCHANGED:
2000
      report_cbs.ReportNotChanged(job_id, status)
2001

    
2002
      # Wait again
2003
      continue
2004

    
2005
    # Split result, a tuple of (field values, log entries)
2006
    (job_info, log_entries) = result
2007
    (status, ) = job_info
2008

    
2009
    if log_entries:
2010
      for log_entry in log_entries:
2011
        (serial, timestamp, log_type, message) = log_entry
2012
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2013
                                    log_type, message)
2014
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2015

    
2016
    # TODO: Handle canceled and archived jobs
2017
    elif status in (constants.JOB_STATUS_SUCCESS,
2018
                    constants.JOB_STATUS_ERROR,
2019
                    constants.JOB_STATUS_CANCELING,
2020
                    constants.JOB_STATUS_CANCELED):
2021
      break
2022

    
2023
    prev_job_info = job_info
2024

    
2025
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2026
  if not jobs:
2027
    raise errors.JobLost("Job with id %s lost" % job_id)
2028

    
2029
  status, opstatus, result = jobs[0]
2030

    
2031
  if status == constants.JOB_STATUS_SUCCESS:
2032
    return result
2033

    
2034
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2035
    raise errors.OpExecError("Job was canceled")
2036

    
2037
  has_ok = False
2038
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2039
    if status == constants.OP_STATUS_SUCCESS:
2040
      has_ok = True
2041
    elif status == constants.OP_STATUS_ERROR:
2042
      errors.MaybeRaise(msg)
2043

    
2044
      if has_ok:
2045
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2046
                                 (idx, msg))
2047

    
2048
      raise errors.OpExecError(str(msg))
2049

    
2050
  # default failure mode
2051
  raise errors.OpExecError(result)
2052

    
2053

    
2054
class JobPollCbBase:
2055
  """Base class for L{GenericPollJob} callbacks.
2056

2057
  """
2058
  def __init__(self):
2059
    """Initializes this class.
2060

2061
    """
2062

    
2063
  def WaitForJobChangeOnce(self, job_id, fields,
2064
                           prev_job_info, prev_log_serial):
2065
    """Waits for changes on a job.
2066

2067
    """
2068
    raise NotImplementedError()
2069

    
2070
  def QueryJobs(self, job_ids, fields):
2071
    """Returns the selected fields for the selected job IDs.
2072

2073
    @type job_ids: list of numbers
2074
    @param job_ids: Job IDs
2075
    @type fields: list of strings
2076
    @param fields: Fields
2077

2078
    """
2079
    raise NotImplementedError()
2080

    
2081

    
2082
class JobPollReportCbBase:
2083
  """Base class for L{GenericPollJob} reporting callbacks.
2084

2085
  """
2086
  def __init__(self):
2087
    """Initializes this class.
2088

2089
    """
2090

    
2091
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2092
    """Handles a log message.
2093

2094
    """
2095
    raise NotImplementedError()
2096

    
2097
  def ReportNotChanged(self, job_id, status):
2098
    """Called for if a job hasn't changed in a while.
2099

2100
    @type job_id: number
2101
    @param job_id: Job ID
2102
    @type status: string or None
2103
    @param status: Job status if available
2104

2105
    """
2106
    raise NotImplementedError()
2107

    
2108

    
2109
class _LuxiJobPollCb(JobPollCbBase):
2110
  def __init__(self, cl):
2111
    """Initializes this class.
2112

2113
    """
2114
    JobPollCbBase.__init__(self)
2115
    self.cl = cl
2116

    
2117
  def WaitForJobChangeOnce(self, job_id, fields,
2118
                           prev_job_info, prev_log_serial):
2119
    """Waits for changes on a job.
2120

2121
    """
2122
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2123
                                        prev_job_info, prev_log_serial)
2124

    
2125
  def QueryJobs(self, job_ids, fields):
2126
    """Returns the selected fields for the selected job IDs.
2127

2128
    """
2129
    return self.cl.QueryJobs(job_ids, fields)
2130

    
2131

    
2132
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2133
  def __init__(self, feedback_fn):
2134
    """Initializes this class.
2135

2136
    """
2137
    JobPollReportCbBase.__init__(self)
2138

    
2139
    self.feedback_fn = feedback_fn
2140

    
2141
    assert callable(feedback_fn)
2142

    
2143
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2144
    """Handles a log message.
2145

2146
    """
2147
    self.feedback_fn((timestamp, log_type, log_msg))
2148

    
2149
  def ReportNotChanged(self, job_id, status):
2150
    """Called if a job hasn't changed in a while.
2151

2152
    """
2153
    # Ignore
2154

    
2155

    
2156
class StdioJobPollReportCb(JobPollReportCbBase):
2157
  def __init__(self):
2158
    """Initializes this class.
2159

2160
    """
2161
    JobPollReportCbBase.__init__(self)
2162

    
2163
    self.notified_queued = False
2164
    self.notified_waitlock = False
2165

    
2166
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2167
    """Handles a log message.
2168

2169
    """
2170
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2171
             FormatLogMessage(log_type, log_msg))
2172

    
2173
  def ReportNotChanged(self, job_id, status):
2174
    """Called if a job hasn't changed in a while.
2175

2176
    """
2177
    if status is None:
2178
      return
2179

    
2180
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2181
      ToStderr("Job %s is waiting in queue", job_id)
2182
      self.notified_queued = True
2183

    
2184
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2185
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2186
      self.notified_waitlock = True
2187

    
2188

    
2189
def FormatLogMessage(log_type, log_msg):
2190
  """Formats a job message according to its type.
2191

2192
  """
2193
  if log_type != constants.ELOG_MESSAGE:
2194
    log_msg = str(log_msg)
2195

    
2196
  return utils.SafeEncode(log_msg)
2197

    
2198

    
2199
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2200
  """Function to poll for the result of a job.
2201

2202
  @type job_id: job identified
2203
  @param job_id: the job to poll for results
2204
  @type cl: luxi.Client
2205
  @param cl: the luxi client to use for communicating with the master;
2206
             if None, a new client will be created
2207

2208
  """
2209
  if cl is None:
2210
    cl = GetClient()
2211

    
2212
  if reporter is None:
2213
    if feedback_fn:
2214
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2215
    else:
2216
      reporter = StdioJobPollReportCb()
2217
  elif feedback_fn:
2218
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2219

    
2220
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2221

    
2222

    
2223
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2224
  """Legacy function to submit an opcode.
2225

2226
  This is just a simple wrapper over the construction of the processor
2227
  instance. It should be extended to better handle feedback and
2228
  interaction functions.
2229

2230
  """
2231
  if cl is None:
2232
    cl = GetClient()
2233

    
2234
  SetGenericOpcodeOpts([op], opts)
2235

    
2236
  job_id = SendJob([op], cl=cl)
2237

    
2238
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2239
                       reporter=reporter)
2240

    
2241
  return op_results[0]
2242

    
2243

    
2244
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2245
  """Wrapper around SubmitOpCode or SendJob.
2246

2247
  This function will decide, based on the 'opts' parameter, whether to
2248
  submit and wait for the result of the opcode (and return it), or
2249
  whether to just send the job and print its identifier. It is used in
2250
  order to simplify the implementation of the '--submit' option.
2251

2252
  It will also process the opcodes if we're sending the via SendJob
2253
  (otherwise SubmitOpCode does it).
2254

2255
  """
2256
  if opts and opts.submit_only:
2257
    job = [op]
2258
    SetGenericOpcodeOpts(job, opts)
2259
    job_id = SendJob(job, cl=cl)
2260
    raise JobSubmittedException(job_id)
2261
  else:
2262
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2263

    
2264

    
2265
def _InitReasonTrail(op, opts):
2266
  """Builds the first part of the reason trail
2267

2268
  Builds the initial part of the reason trail, adding the user provided reason
2269
  (if it exists) and the name of the command starting the operation.
2270

2271
  @param op: the opcode the reason trail will be added to
2272
  @param opts: the command line options selected by the user
2273

2274
  """
2275
  assert len(sys.argv) >= 2
2276
  trail = []
2277

    
2278
  if opts.reason:
2279
    trail.append((constants.OPCODE_REASON_SRC_USER,
2280
                  opts.reason,
2281
                  utils.EpochNano()))
2282

    
2283
  binary = os.path.basename(sys.argv[0])
2284
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2285
  command = sys.argv[1]
2286
  trail.append((source, command, utils.EpochNano()))
2287
  op.reason = trail
2288

    
2289

    
2290
def SetGenericOpcodeOpts(opcode_list, options):
2291
  """Processor for generic options.
2292

2293
  This function updates the given opcodes based on generic command
2294
  line options (like debug, dry-run, etc.).
2295

2296
  @param opcode_list: list of opcodes
2297
  @param options: command line options or None
2298
  @return: None (in-place modification)
2299

2300
  """
2301
  if not options:
2302
    return
2303
  for op in opcode_list:
2304
    op.debug_level = options.debug
2305
    if hasattr(options, "dry_run"):
2306
      op.dry_run = options.dry_run
2307
    if getattr(options, "priority", None) is not None:
2308
      op.priority = options.priority
2309
    _InitReasonTrail(op, options)
2310

    
2311

    
2312
def GetClient(query=False):
2313
  """Connects to the a luxi socket and returns a client.
2314

2315
  @type query: boolean
2316
  @param query: this signifies that the client will only be
2317
      used for queries; if the build-time parameter
2318
      enable-split-queries is enabled, then the client will be
2319
      connected to the query socket instead of the masterd socket
2320

2321
  """
2322
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2323
  if override_socket:
2324
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2325
      address = pathutils.MASTER_SOCKET
2326
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2327
      address = pathutils.QUERY_SOCKET
2328
    else:
2329
      address = override_socket
2330
  elif query and constants.ENABLE_SPLIT_QUERY:
2331
    address = pathutils.QUERY_SOCKET
2332
  else:
2333
    address = None
2334
  # TODO: Cache object?
2335
  try:
2336
    client = luxi.Client(address=address)
2337
  except luxi.NoMasterError:
2338
    ss = ssconf.SimpleStore()
2339

    
2340
    # Try to read ssconf file
2341
    try:
2342
      ss.GetMasterNode()
2343
    except errors.ConfigurationError:
2344
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2345
                                 " not part of a cluster",
2346
                                 errors.ECODE_INVAL)
2347

    
2348
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2349
    if master != myself:
2350
      raise errors.OpPrereqError("This is not the master node, please connect"
2351
                                 " to node '%s' and rerun the command" %
2352
                                 master, errors.ECODE_INVAL)
2353
    raise
2354
  return client
2355

    
2356

    
2357
def FormatError(err):
2358
  """Return a formatted error message for a given error.
2359

2360
  This function takes an exception instance and returns a tuple
2361
  consisting of two values: first, the recommended exit code, and
2362
  second, a string describing the error message (not
2363
  newline-terminated).
2364

2365
  """
2366
  retcode = 1
2367
  obuf = StringIO()
2368
  msg = str(err)
2369
  if isinstance(err, errors.ConfigurationError):
2370
    txt = "Corrupt configuration file: %s" % msg
2371
    logging.error(txt)
2372
    obuf.write(txt + "\n")
2373
    obuf.write("Aborting.")
2374
    retcode = 2
2375
  elif isinstance(err, errors.HooksAbort):
2376
    obuf.write("Failure: hooks execution failed:\n")
2377
    for node, script, out in err.args[0]:
2378
      if out:
2379
        obuf.write("  node: %s, script: %s, output: %s\n" %
2380
                   (node, script, out))
2381
      else:
2382
        obuf.write("  node: %s, script: %s (no output)\n" %
2383
                   (node, script))
2384
  elif isinstance(err, errors.HooksFailure):
2385
    obuf.write("Failure: hooks general failure: %s" % msg)
2386
  elif isinstance(err, errors.ResolverError):
2387
    this_host = netutils.Hostname.GetSysName()
2388
    if err.args[0] == this_host:
2389
      msg = "Failure: can't resolve my own hostname ('%s')"
2390
    else:
2391
      msg = "Failure: can't resolve hostname '%s'"
2392
    obuf.write(msg % err.args[0])
2393
  elif isinstance(err, errors.OpPrereqError):
2394
    if len(err.args) == 2:
2395
      obuf.write("Failure: prerequisites not met for this"
2396
                 " operation:\nerror type: %s, error details:\n%s" %
2397
                 (err.args[1], err.args[0]))
2398
    else:
2399
      obuf.write("Failure: prerequisites not met for this"
2400
                 " operation:\n%s" % msg)
2401
  elif isinstance(err, errors.OpExecError):
2402
    obuf.write("Failure: command execution error:\n%s" % msg)
2403
  elif isinstance(err, errors.TagError):
2404
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2405
  elif isinstance(err, errors.JobQueueDrainError):
2406
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2407
               " accept new requests\n")
2408
  elif isinstance(err, errors.JobQueueFull):
2409
    obuf.write("Failure: the job queue is full and doesn't accept new"
2410
               " job submissions until old jobs are archived\n")
2411
  elif isinstance(err, errors.TypeEnforcementError):
2412
    obuf.write("Parameter Error: %s" % msg)
2413
  elif isinstance(err, errors.ParameterError):
2414
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2415
  elif isinstance(err, luxi.NoMasterError):
2416
    if err.args[0] == pathutils.MASTER_SOCKET:
2417
      daemon = "the master daemon"
2418
    elif err.args[0] == pathutils.QUERY_SOCKET:
2419
      daemon = "the config daemon"
2420
    else:
2421
      daemon = "socket '%s'" % str(err.args[0])
2422
    obuf.write("Cannot communicate with %s.\nIs the process running"
2423
               " and listening for connections?" % daemon)
2424
  elif isinstance(err, luxi.TimeoutError):
2425
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2426
               " been submitted and will continue to run even if the call"
2427
               " timed out. Useful commands in this situation are \"gnt-job"
2428
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2429
    obuf.write(msg)
2430
  elif isinstance(err, luxi.PermissionError):
2431
    obuf.write("It seems you don't have permissions to connect to the"
2432
               " master daemon.\nPlease retry as a different user.")
2433
  elif isinstance(err, luxi.ProtocolError):
2434
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2435
               "%s" % msg)
2436
  elif isinstance(err, errors.JobLost):
2437
    obuf.write("Error checking job status: %s" % msg)
2438
  elif isinstance(err, errors.QueryFilterParseError):
2439
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2440
    obuf.write("\n".join(err.GetDetails()))
2441
  elif isinstance(err, errors.GenericError):
2442
    obuf.write("Unhandled Ganeti error: %s" % msg)
2443
  elif isinstance(err, JobSubmittedException):
2444
    obuf.write("JobID: %s\n" % err.args[0])
2445
    retcode = 0
2446
  else:
2447
    obuf.write("Unhandled exception: %s" % msg)
2448
  return retcode, obuf.getvalue().rstrip("\n")
2449

    
2450

    
2451
def GenericMain(commands, override=None, aliases=None,
2452
                env_override=frozenset()):
2453
  """Generic main function for all the gnt-* commands.
2454

2455
  @param commands: a dictionary with a special structure, see the design doc
2456
                   for command line handling.
2457
  @param override: if not None, we expect a dictionary with keys that will
2458
                   override command line options; this can be used to pass
2459
                   options from the scripts to generic functions
2460
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2461
  @param env_override: list of environment names which are allowed to submit
2462
                       default args for commands
2463

2464
  """
2465
  # save the program name and the entire command line for later logging
2466
  if sys.argv:
2467
    binary = os.path.basename(sys.argv[0])
2468
    if not binary:
2469
      binary = sys.argv[0]
2470

    
2471
    if len(sys.argv) >= 2:
2472
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2473
    else:
2474
      logname = binary
2475

    
2476
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2477
  else:
2478
    binary = "<unknown program>"
2479
    cmdline = "<unknown>"
2480

    
2481
  if aliases is None:
2482
    aliases = {}
2483

    
2484
  try:
2485
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2486
                                       env_override)
2487
  except _ShowVersion:
2488
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2489
             constants.RELEASE_VERSION)
2490
    return constants.EXIT_SUCCESS
2491
  except _ShowUsage, err:
2492
    for line in _FormatUsage(binary, commands):
2493
      ToStdout(line)
2494

    
2495
    if err.exit_error:
2496
      return constants.EXIT_FAILURE
2497
    else:
2498
      return constants.EXIT_SUCCESS
2499
  except errors.ParameterError, err:
2500
    result, err_msg = FormatError(err)
2501
    ToStderr(err_msg)
2502
    return 1
2503

    
2504
  if func is None: # parse error
2505
    return 1
2506

    
2507
  if override is not None:
2508
    for key, val in override.iteritems():
2509
      setattr(options, key, val)
2510

    
2511
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2512
                     stderr_logging=True)
2513

    
2514
  logging.info("Command line: %s", cmdline)
2515

    
2516
  try:
2517
    result = func(options, args)
2518
  except (errors.GenericError, luxi.ProtocolError,
2519
          JobSubmittedException), err:
2520
    result, err_msg = FormatError(err)
2521
    logging.exception("Error during command processing")
2522
    ToStderr(err_msg)
2523
  except KeyboardInterrupt:
2524
    result = constants.EXIT_FAILURE
2525
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2526
             " might have been submitted and"
2527
             " will continue to run in the background.")
2528
  except IOError, err:
2529
    if err.errno == errno.EPIPE:
2530
      # our terminal went away, we'll exit
2531
      sys.exit(constants.EXIT_FAILURE)
2532
    else:
2533
      raise
2534

    
2535
  return result
2536

    
2537

    
2538
def ParseNicOption(optvalue):
2539
  """Parses the value of the --net option(s).
2540

2541
  """
2542
  try:
2543
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2544
  except (TypeError, ValueError), err:
2545
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2546
                               errors.ECODE_INVAL)
2547

    
2548
  nics = [{}] * nic_max
2549
  for nidx, ndict in optvalue:
2550
    nidx = int(nidx)
2551

    
2552
    if not isinstance(ndict, dict):
2553
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2554
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2555

    
2556
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2557

    
2558
    nics[nidx] = ndict
2559

    
2560
  return nics
2561

    
2562

    
2563
def GenericInstanceCreate(mode, opts, args):
2564
  """Add an instance to the cluster via either creation or import.
2565

2566
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2567
  @param opts: the command line options selected by the user
2568
  @type args: list
2569
  @param args: should contain only one element, the new instance name
2570
  @rtype: int
2571
  @return: the desired exit code
2572

2573
  """
2574
  instance = args[0]
2575

    
2576
  (pnode, snode) = SplitNodeOption(opts.node)
2577

    
2578
  hypervisor = None
2579
  hvparams = {}
2580
  if opts.hypervisor:
2581
    hypervisor, hvparams = opts.hypervisor
2582

    
2583
  if opts.nics:
2584
    nics = ParseNicOption(opts.nics)
2585
  elif opts.no_nics:
2586
    # no nics
2587
    nics = []
2588
  elif mode == constants.INSTANCE_CREATE:
2589
    # default of one nic, all auto
2590
    nics = [{}]
2591
  else:
2592
    # mode == import
2593
    nics = []
2594

    
2595
  if opts.disk_template == constants.DT_DISKLESS:
2596
    if opts.disks or opts.sd_size is not None:
2597
      raise errors.OpPrereqError("Diskless instance but disk"
2598
                                 " information passed", errors.ECODE_INVAL)
2599
    disks = []
2600
  else:
2601
    if (not opts.disks and not opts.sd_size
2602
        and mode == constants.INSTANCE_CREATE):
2603
      raise errors.OpPrereqError("No disk information specified",
2604
                                 errors.ECODE_INVAL)
2605
    if opts.disks and opts.sd_size is not None:
2606
      raise errors.OpPrereqError("Please use either the '--disk' or"
2607
                                 " '-s' option", errors.ECODE_INVAL)
2608
    if opts.sd_size is not None:
2609
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2610

    
2611
    if opts.disks:
2612
      try:
2613
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2614
      except ValueError, err:
2615
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2616
                                   errors.ECODE_INVAL)
2617
      disks = [{}] * disk_max
2618
    else:
2619
      disks = []
2620
    for didx, ddict in opts.disks:
2621
      didx = int(didx)
2622
      if not isinstance(ddict, dict):
2623
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2624
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2625
      elif constants.IDISK_SIZE in ddict:
2626
        if constants.IDISK_ADOPT in ddict:
2627
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2628
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2629
        try:
2630
          ddict[constants.IDISK_SIZE] = \
2631
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2632
        except ValueError, err:
2633
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2634
                                     (didx, err), errors.ECODE_INVAL)
2635
      elif constants.IDISK_ADOPT in ddict:
2636
        if mode == constants.INSTANCE_IMPORT:
2637
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2638
                                     " import", errors.ECODE_INVAL)
2639
        ddict[constants.IDISK_SIZE] = 0
2640
      else:
2641
        raise errors.OpPrereqError("Missing size or adoption source for"
2642
                                   " disk %d" % didx, errors.ECODE_INVAL)
2643
      disks[didx] = ddict
2644

    
2645
  if opts.tags is not None:
2646
    tags = opts.tags.split(",")
2647
  else:
2648
    tags = []
2649

    
2650
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2651
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2652

    
2653
  if mode == constants.INSTANCE_CREATE:
2654
    start = opts.start
2655
    os_type = opts.os
2656
    force_variant = opts.force_variant
2657
    src_node = None
2658
    src_path = None
2659
    no_install = opts.no_install
2660
    identify_defaults = False
2661
  elif mode == constants.INSTANCE_IMPORT:
2662
    start = False
2663
    os_type = None
2664
    force_variant = False
2665
    src_node = opts.src_node
2666
    src_path = opts.src_dir
2667
    no_install = None
2668
    identify_defaults = opts.identify_defaults
2669
  else:
2670
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2671

    
2672
  op = opcodes.OpInstanceCreate(instance_name=instance,
2673
                                disks=disks,
2674
                                disk_template=opts.disk_template,
2675
                                nics=nics,
2676
                                conflicts_check=opts.conflicts_check,
2677
                                pnode=pnode, snode=snode,
2678
                                ip_check=opts.ip_check,
2679
                                name_check=opts.name_check,
2680
                                wait_for_sync=opts.wait_for_sync,
2681
                                file_storage_dir=opts.file_storage_dir,
2682
                                file_driver=opts.file_driver,
2683
                                iallocator=opts.iallocator,
2684
                                hypervisor=hypervisor,
2685
                                hvparams=hvparams,
2686
                                beparams=opts.beparams,
2687
                                osparams=opts.osparams,
2688
                                mode=mode,
2689
                                start=start,
2690
                                os_type=os_type,
2691
                                force_variant=force_variant,
2692
                                src_node=src_node,
2693
                                src_path=src_path,
2694
                                tags=tags,
2695
                                no_install=no_install,
2696
                                identify_defaults=identify_defaults,
2697
                                ignore_ipolicy=opts.ignore_ipolicy)
2698

    
2699
  SubmitOrSend(op, opts)
2700
  return 0
2701

    
2702

    
2703
class _RunWhileClusterStoppedHelper:
2704
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2705

2706
  """
2707
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2708
    """Initializes this class.
2709

2710
    @type feedback_fn: callable
2711
    @param feedback_fn: Feedback function
2712
    @type cluster_name: string
2713
    @param cluster_name: Cluster name
2714
    @type master_node: string
2715
    @param master_node Master node name
2716
    @type online_nodes: list
2717
    @param online_nodes: List of names of online nodes
2718

2719
    """
2720
    self.feedback_fn = feedback_fn
2721
    self.cluster_name = cluster_name
2722
    self.master_node = master_node
2723
    self.online_nodes = online_nodes
2724

    
2725
    self.ssh = ssh.SshRunner(self.cluster_name)
2726

    
2727
    self.nonmaster_nodes = [name for name in online_nodes
2728
                            if name != master_node]
2729

    
2730
    assert self.master_node not in self.nonmaster_nodes
2731

    
2732
  def _RunCmd(self, node_name, cmd):
2733
    """Runs a command on the local or a remote machine.
2734

2735
    @type node_name: string
2736
    @param node_name: Machine name
2737
    @type cmd: list
2738
    @param cmd: Command
2739

2740
    """
2741
    if node_name is None or node_name == self.master_node:
2742
      # No need to use SSH
2743
      result = utils.RunCmd(cmd)
2744
    else:
2745
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2746
                            utils.ShellQuoteArgs(cmd))
2747

    
2748
    if result.failed:
2749
      errmsg = ["Failed to run command %s" % result.cmd]
2750
      if node_name:
2751
        errmsg.append("on node %s" % node_name)
2752
      errmsg.append(": exitcode %s and error %s" %
2753
                    (result.exit_code, result.output))
2754
      raise errors.OpExecError(" ".join(errmsg))
2755

    
2756
  def Call(self, fn, *args):
2757
    """Call function while all daemons are stopped.
2758

2759
    @type fn: callable
2760
    @param fn: Function to be called
2761

2762
    """
2763
    # Pause watcher by acquiring an exclusive lock on watcher state file
2764
    self.feedback_fn("Blocking watcher")
2765
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2766
    try:
2767
      # TODO: Currently, this just blocks. There's no timeout.
2768
      # TODO: Should it be a shared lock?
2769
      watcher_block.Exclusive(blocking=True)
2770

    
2771
      # Stop master daemons, so that no new jobs can come in and all running
2772
      # ones are finished
2773
      self.feedback_fn("Stopping master daemons")
2774
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2775
      try:
2776
        # Stop daemons on all nodes
2777
        for node_name in self.online_nodes:
2778
          self.feedback_fn("Stopping daemons on %s" % node_name)
2779
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2780

    
2781
        # All daemons are shut down now
2782
        try:
2783
          return fn(self, *args)
2784
        except Exception, err:
2785
          _, errmsg = FormatError(err)
2786
          logging.exception("Caught exception")
2787
          self.feedback_fn(errmsg)
2788
          raise
2789
      finally:
2790
        # Start cluster again, master node last
2791
        for node_name in self.nonmaster_nodes + [self.master_node]:
2792
          self.feedback_fn("Starting daemons on %s" % node_name)
2793
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2794
    finally:
2795
      # Resume watcher
2796
      watcher_block.Close()
2797

    
2798

    
2799
def RunWhileClusterStopped(feedback_fn, fn, *args):
2800
  """Calls a function while all cluster daemons are stopped.
2801

2802
  @type feedback_fn: callable
2803
  @param feedback_fn: Feedback function
2804
  @type fn: callable
2805
  @param fn: Function to be called when daemons are stopped
2806

2807
  """
2808
  feedback_fn("Gathering cluster information")
2809

    
2810
  # This ensures we're running on the master daemon
2811
  cl = GetClient()
2812

    
2813
  (cluster_name, master_node) = \
2814
    cl.QueryConfigValues(["cluster_name", "master_node"])
2815

    
2816
  online_nodes = GetOnlineNodes([], cl=cl)
2817

    
2818
  # Don't keep a reference to the client. The master daemon will go away.
2819
  del cl
2820

    
2821
  assert master_node in online_nodes
2822

    
2823
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2824
                                       online_nodes).Call(fn, *args)
2825

    
2826

    
2827
def GenerateTable(headers, fields, separator, data,
2828
                  numfields=None, unitfields=None,
2829
                  units=None):
2830
  """Prints a table with headers and different fields.
2831

2832
  @type headers: dict
2833
  @param headers: dictionary mapping field names to headers for
2834
      the table
2835
  @type fields: list
2836
  @param fields: the field names corresponding to each row in
2837
      the data field
2838
  @param separator: the separator to be used; if this is None,
2839
      the default 'smart' algorithm is used which computes optimal
2840
      field width, otherwise just the separator is used between
2841
      each field
2842
  @type data: list
2843
  @param data: a list of lists, each sublist being one row to be output
2844
  @type numfields: list
2845
  @param numfields: a list with the fields that hold numeric
2846
      values and thus should be right-aligned
2847
  @type unitfields: list
2848
  @param unitfields: a list with the fields that hold numeric
2849
      values that should be formatted with the units field
2850
  @type units: string or None
2851
  @param units: the units we should use for formatting, or None for
2852
      automatic choice (human-readable for non-separator usage, otherwise
2853
      megabytes); this is a one-letter string
2854

2855
  """
2856
  if units is None:
2857
    if separator:
2858
      units = "m"
2859
    else:
2860
      units = "h"
2861

    
2862
  if numfields is None:
2863
    numfields = []
2864
  if unitfields is None:
2865
    unitfields = []
2866

    
2867
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2868
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2869

    
2870
  format_fields = []
2871
  for field in fields:
2872
    if headers and field not in headers:
2873
      # TODO: handle better unknown fields (either revert to old
2874
      # style of raising exception, or deal more intelligently with
2875
      # variable fields)
2876
      headers[field] = field
2877
    if separator is not None:
2878
      format_fields.append("%s")
2879
    elif numfields.Matches(field):
2880
      format_fields.append("%*s")
2881
    else:
2882
      format_fields.append("%-*s")
2883

    
2884
  if separator is None:
2885
    mlens = [0 for name in fields]
2886
    format_str = " ".join(format_fields)
2887
  else:
2888
    format_str = separator.replace("%", "%%").join(format_fields)
2889

    
2890
  for row in data:
2891
    if row is None:
2892
      continue
2893
    for idx, val in enumerate(row):
2894
      if unitfields.Matches(fields[idx]):
2895
        try:
2896
          val = int(val)
2897
        except (TypeError, ValueError):
2898
          pass
2899
        else:
2900
          val = row[idx] = utils.FormatUnit(val, units)
2901
      val = row[idx] = str(val)
2902
      if separator is None:
2903
        mlens[idx] = max(mlens[idx], len(val))
2904

    
2905
  result = []
2906
  if headers:
2907
    args = []
2908
    for idx, name in enumerate(fields):
2909
      hdr = headers[name]
2910
      if separator is None:
2911
        mlens[idx] = max(mlens[idx], len(hdr))
2912
        args.append(mlens[idx])
2913
      args.append(hdr)
2914
    result.append(format_str % tuple(args))
2915

    
2916
  if separator is None:
2917
    assert len(mlens) == len(fields)
2918

    
2919
    if fields and not numfields.Matches(fields[-1]):
2920
      mlens[-1] = 0
2921

    
2922
  for line in data:
2923
    args = []
2924
    if line is None:
2925
      line = ["-" for _ in fields]
2926
    for idx in range(len(fields)):
2927
      if separator is None:
2928
        args.append(mlens[idx])
2929
      args.append(line[idx])
2930
    result.append(format_str % tuple(args))
2931

    
2932
  return result
2933

    
2934

    
2935
def _FormatBool(value):
2936
  """Formats a boolean value as a string.
2937

2938
  """
2939
  if value:
2940
    return "Y"
2941
  return "N"
2942

    
2943

    
2944
#: Default formatting for query results; (callback, align right)
2945
_DEFAULT_FORMAT_QUERY = {
2946
  constants.QFT_TEXT: (str, False),
2947
  constants.QFT_BOOL: (_FormatBool, False),
2948
  constants.QFT_NUMBER: (str, True),
2949
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2950
  constants.QFT_OTHER: (str, False),
2951
  constants.QFT_UNKNOWN: (str, False),
2952
  }
2953

    
2954

    
2955
def _GetColumnFormatter(fdef, override, unit):
2956
  """Returns formatting function for a field.
2957

2958
  @type fdef: L{objects.QueryFieldDefinition}
2959
  @type override: dict
2960
  @param override: Dictionary for overriding field formatting functions,
2961
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2962
  @type unit: string
2963
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2964
  @rtype: tuple; (callable, bool)
2965
  @return: Returns the function to format a value (takes one parameter) and a
2966
    boolean for aligning the value on the right-hand side
2967

2968
  """
2969
  fmt = override.get(fdef.name, None)
2970
  if fmt is not None:
2971
    return fmt
2972

    
2973
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2974

    
2975
  if fdef.kind == constants.QFT_UNIT:
2976
    # Can't keep this information in the static dictionary
2977
    return (lambda value: utils.FormatUnit(value, unit), True)
2978

    
2979
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2980
  if fmt is not None:
2981
    return fmt
2982

    
2983
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2984

    
2985

    
2986
class _QueryColumnFormatter:
2987
  """Callable class for formatting fields of a query.
2988

2989
  """
2990
  def __init__(self, fn, status_fn, verbose):
2991
    """Initializes this class.
2992

2993
    @type fn: callable
2994
    @param fn: Formatting function
2995
    @type status_fn: callable
2996
    @param status_fn: Function to report fields' status
2997
    @type verbose: boolean
2998
    @param verbose: whether to use verbose field descriptions or not
2999

3000
    """
3001
    self._fn = fn
3002
    self._status_fn = status_fn
3003
    self._verbose = verbose
3004

    
3005
  def __call__(self, data):
3006
    """Returns a field's string representation.
3007

3008
    """
3009
    (status, value) = data
3010

    
3011
    # Report status
3012
    self._status_fn(status)
3013

    
3014
    if status == constants.RS_NORMAL:
3015
      return self._fn(value)
3016

    
3017
    assert value is None, \
3018
           "Found value %r for abnormal status %s" % (value, status)
3019

    
3020
    return FormatResultError(status, self._verbose)
3021

    
3022

    
3023
def FormatResultError(status, verbose):
3024
  """Formats result status other than L{constants.RS_NORMAL}.
3025

3026
  @param status: The result status
3027
  @type verbose: boolean
3028
  @param verbose: Whether to return the verbose text
3029
  @return: Text of result status
3030

3031
  """
3032
  assert status != constants.RS_NORMAL, \
3033
         "FormatResultError called with status equal to constants.RS_NORMAL"
3034
  try:
3035
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3036
  except KeyError:
3037
    raise NotImplementedError("Unknown status %s" % status)
3038
  else:
3039
    if verbose:
3040
      return verbose_text
3041
    return normal_text
3042

    
3043

    
3044
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3045
                      header=False, verbose=False):
3046
  """Formats data in L{objects.QueryResponse}.
3047

3048
  @type result: L{objects.QueryResponse}
3049
  @param result: result of query operation
3050
  @type unit: string
3051
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3052
    see L{utils.text.FormatUnit}
3053
  @type format_override: dict
3054
  @param format_override: Dictionary for overriding field formatting functions,
3055
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3056
  @type separator: string or None
3057
  @param separator: String used to separate fields
3058
  @type header: bool
3059
  @param header: Whether to output header row
3060
  @type verbose: boolean
3061
  @param verbose: whether to use verbose field descriptions or not
3062

3063
  """
3064
  if unit is None:
3065
    if separator:
3066
      unit = "m"
3067
    else:
3068
      unit = "h"
3069

    
3070
  if format_override is None:
3071
    format_override = {}
3072

    
3073
  stats = dict.fromkeys(constants.RS_ALL, 0)
3074

    
3075
  def _RecordStatus(status):
3076
    if status in stats:
3077
      stats[status] += 1
3078

    
3079
  columns = []
3080
  for fdef in result.fields:
3081
    assert fdef.title and fdef.name
3082
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3083
    columns.append(TableColumn(fdef.title,
3084
                               _QueryColumnFormatter(fn, _RecordStatus,
3085
                                                     verbose),
3086
                               align_right))
3087

    
3088
  table = FormatTable(result.data, columns, header, separator)
3089

    
3090
  # Collect statistics
3091
  assert len(stats) == len(constants.RS_ALL)
3092
  assert compat.all(count >= 0 for count in stats.values())
3093

    
3094
  # Determine overall status. If there was no data, unknown fields must be
3095
  # detected via the field definitions.
3096
  if (stats[constants.RS_UNKNOWN] or
3097
      (not result.data and _GetUnknownFields(result.fields))):
3098
    status = QR_UNKNOWN
3099
  elif compat.any(count > 0 for key, count in stats.items()
3100
                  if key != constants.RS_NORMAL):
3101
    status = QR_INCOMPLETE
3102
  else:
3103
    status = QR_NORMAL
3104

    
3105
  return (status, table)
3106

    
3107

    
3108
def _GetUnknownFields(fdefs):
3109
  """Returns list of unknown fields included in C{fdefs}.
3110

3111
  @type fdefs: list of L{objects.QueryFieldDefinition}
3112

3113
  """
3114
  return [fdef for fdef in fdefs
3115
          if fdef.kind == constants.QFT_UNKNOWN]
3116

    
3117

    
3118
def _WarnUnknownFields(fdefs):
3119
  """Prints a warning to stderr if a query included unknown fields.
3120

3121
  @type fdefs: list of L{objects.QueryFieldDefinition}
3122

3123
  """
3124
  unknown = _GetUnknownFields(fdefs)
3125
  if unknown:
3126
    ToStderr("Warning: Queried for unknown fields %s",
3127
             utils.CommaJoin(fdef.name for fdef in unknown))
3128
    return True
3129

    
3130
  return False
3131

    
3132

    
3133
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3134
                format_override=None, verbose=False, force_filter=False,
3135
                namefield=None, qfilter=None, isnumeric=False):
3136
  """Generic implementation for listing all items of a resource.
3137

3138
  @param resource: One of L{constants.QR_VIA_LUXI}
3139
  @type fields: list of strings
3140
  @param fields: List of fields to query for
3141
  @type names: list of strings
3142
  @param names: Names of items to query for
3143
  @type unit: string or None
3144
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3145
    None for automatic choice (human-readable for non-separator usage,
3146
    otherwise megabytes); this is a one-letter string
3147
  @type separator: string or None
3148
  @param separator: String used to separate fields
3149
  @type header: bool
3150
  @param header: Whether to show header row
3151
  @type force_filter: bool
3152
  @param force_filter: Whether to always treat names as filter
3153
  @type format_override: dict
3154
  @param format_override: Dictionary for overriding field formatting functions,
3155
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3156
  @type verbose: boolean
3157
  @param verbose: whether to use verbose field descriptions or not
3158
  @type namefield: string
3159
  @param namefield: Name of field to use for simple filters (see
3160
    L{qlang.MakeFilter} for details)
3161
  @type qfilter: list or None
3162
  @param qfilter: Query filter (in addition to names)
3163
  @param isnumeric: bool
3164
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3165
    any simple filters built by namefield should use integer values to
3166
    reflect that
3167

3168
  """
3169
  if not names:
3170
    names = None
3171

    
3172
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3173
                                isnumeric=isnumeric)
3174

    
3175
  if qfilter is None:
3176
    qfilter = namefilter
3177
  elif namefilter is not None:
3178
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3179

    
3180
  if cl is None:
3181
    cl = GetClient()
3182

    
3183
  response = cl.Query(resource, fields, qfilter)
3184

    
3185
  found_unknown = _WarnUnknownFields(response.fields)
3186

    
3187
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3188
                                     header=header,
3189
                                     format_override=format_override,
3190
                                     verbose=verbose)
3191

    
3192
  for line in data:
3193
    ToStdout(line)
3194

    
3195
  assert ((found_unknown and status == QR_UNKNOWN) or
3196
          (not found_unknown and status != QR_UNKNOWN))
3197

    
3198
  if status == QR_UNKNOWN:
3199
    return constants.EXIT_UNKNOWN_FIELD
3200

    
3201
  # TODO: Should the list command fail if not all data could be collected?
3202
  return constants.EXIT_SUCCESS
3203

    
3204

    
3205
def _FieldDescValues(fdef):
3206
  """Helper function for L{GenericListFields} to get query field description.
3207

3208
  @type fdef: L{objects.QueryFieldDefinition}
3209
  @rtype: list
3210

3211
  """
3212
  return [
3213
    fdef.name,
3214
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3215
    fdef.title,
3216
    fdef.doc,
3217
    ]
3218

    
3219

    
3220
def GenericListFields(resource, fields, separator, header, cl=None):
3221
  """Generic implementation for listing fields for a resource.
3222

3223
  @param resource: One of L{constants.QR_VIA_LUXI}
3224
  @type fields: list of strings
3225
  @param fields: List of fields to query for
3226
  @type separator: string or None
3227
  @param separator: String used to separate fields
3228
  @type header: bool
3229
  @param header: Whether to show header row
3230

3231
  """
3232
  if cl is None:
3233
    cl = GetClient()
3234

    
3235
  if not fields:
3236
    fields = None
3237

    
3238
  response = cl.QueryFields(resource, fields)
3239

    
3240
  found_unknown = _WarnUnknownFields(response.fields)
3241

    
3242
  columns = [
3243
    TableColumn("Name", str, False),
3244
    TableColumn("Type", str, False),
3245
    TableColumn("Title", str, False),
3246
    TableColumn("Description", str, False),
3247
    ]
3248

    
3249
  rows = map(_FieldDescValues, response.fields)
3250

    
3251
  for line in FormatTable(rows, columns, header, separator):
3252
    ToStdout(line)
3253

    
3254
  if found_unknown:
3255
    return constants.EXIT_UNKNOWN_FIELD
3256

    
3257
  return constants.EXIT_SUCCESS
3258

    
3259

    
3260
class TableColumn:
3261
  """Describes a column for L{FormatTable}.
3262

3263
  """
3264
  def __init__(self, title, fn, align_right):
3265
    """Initializes this class.
3266

3267
    @type title: string
3268
    @param title: Column title
3269
    @type fn: callable
3270
    @param fn: Formatting function
3271
    @type align_right: bool
3272
    @param align_right: Whether to align values on the right-hand side
3273

3274
    """
3275
    self.title = title
3276
    self.format = fn
3277
    self.align_right = align_right
3278

    
3279

    
3280
def _GetColFormatString(width, align_right):
3281
  """Returns the format string for a field.
3282

3283
  """
3284
  if align_right:
3285
    sign = ""
3286
  else:
3287
    sign = "-"
3288

    
3289
  return "%%%s%ss" % (sign, width)
3290

    
3291

    
3292
def FormatTable(rows, columns, header, separator):
3293
  """Formats data as a table.
3294

3295
  @type rows: list of lists
3296
  @param rows: Row data, one list per row
3297
  @type columns: list of L{TableColumn}
3298
  @param columns: Column descriptions
3299
  @type header: bool
3300
  @param header: Whether to show header row
3301
  @type separator: string or None
3302
  @param separator: String used to separate columns
3303

3304
  """
3305
  if header:
3306
    data = [[col.title for col in columns]]
3307
    colwidth = [len(col.title) for col in columns]
3308
  else:
3309
    data = []
3310
    colwidth = [0 for _ in columns]
3311

    
3312
  # Format row data
3313
  for row in rows:
3314
    assert len(row) == len(columns)
3315

    
3316
    formatted = [col.format(value) for value, col in zip(row, columns)]
3317

    
3318
    if separator is None:
3319
      # Update column widths
3320
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3321
        # Modifying a list's items while iterating is fine
3322
        colwidth[idx] = max(oldwidth, len(value))
3323

    
3324
    data.append(formatted)
3325

    
3326
  if separator is not None:
3327
    # Return early if a separator is used
3328
    return [separator.join(row) for row in data]
3329

    
3330
  if columns and not columns[-1].align_right:
3331
    # Avoid unnecessary spaces at end of line
3332
    colwidth[-1] = 0
3333

    
3334
  # Build format string
3335
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3336
                  for col, width in zip(columns, colwidth)])
3337

    
3338
  return [fmt % tuple(row) for row in data]
3339

    
3340

    
3341
def FormatTimestamp(ts):
3342
  """Formats a given timestamp.
3343

3344
  @type ts: timestamp
3345
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3346

3347
  @rtype: string
3348
  @return: a string with the formatted timestamp
3349

3350
  """
3351
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3352
    return "?"
3353

    
3354
  (sec, usecs) = ts
3355
  return utils.FormatTime(sec, usecs=usecs)
3356

    
3357

    
3358
def ParseTimespec(value):
3359
  """Parse a time specification.
3360

3361
  The following suffixed will be recognized:
3362

3363
    - s: seconds
3364
    - m: minutes
3365
    - h: hours
3366
    - d: day
3367
    - w: weeks
3368

3369
  Without any suffix, the value will be taken to be in seconds.
3370

3371
  """
3372
  value = str(value)
3373
  if not value:
3374
    raise errors.OpPrereqError("Empty time specification passed",
3375
                               errors.ECODE_INVAL)
3376
  suffix_map = {
3377
    "s": 1,
3378
    "m": 60,
3379
    "h": 3600,
3380
    "d": 86400,
3381
    "w": 604800,
3382
    }
3383
  if value[-1] not in suffix_map:
3384
    try:
3385
      value = int(value)
3386
    except (TypeError, ValueError):
3387
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3388
                                 errors.ECODE_INVAL)
3389
  else:
3390
    multiplier = suffix_map[value[-1]]
3391
    value = value[:-1]
3392
    if not value: # no data left after stripping the suffix
3393
      raise errors.OpPrereqError("Invalid time specification (only"
3394
                                 " suffix passed)", errors.ECODE_INVAL)
3395
    try:
3396
      value = int(value) * multiplier
3397
    except (TypeError, ValueError):
3398
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3399
                                 errors.ECODE_INVAL)
3400
  return value
3401

    
3402

    
3403
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3404
                   filter_master=False, nodegroup=None):
3405
  """Returns the names of online nodes.
3406

3407
  This function will also log a warning on stderr with the names of
3408
  the online nodes.
3409

3410
  @param nodes: if not empty, use only this subset of nodes (minus the
3411
      offline ones)
3412
  @param cl: if not None, luxi client to use
3413
  @type nowarn: boolean
3414
  @param nowarn: by default, this function will output a note with the
3415
      offline nodes that are skipped; if this parameter is True the
3416
      note is not displayed
3417
  @type secondary_ips: boolean
3418
  @param secondary_ips: if True, return the secondary IPs instead of the
3419
      names, useful for doing network traffic over the replication interface
3420
      (if any)
3421
  @type filter_master: boolean
3422
  @param filter_master: if True, do not return the master node in the list
3423
      (useful in coordination with secondary_ips where we cannot check our
3424
      node name against the list)
3425
  @type nodegroup: string
3426
  @param nodegroup: If set, only return nodes in this node group
3427

3428
  """
3429
  if cl is None:
3430
    cl = GetClient()
3431

    
3432
  qfilter = []
3433

    
3434
  if nodes:
3435
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3436

    
3437
  if nodegroup is not None:
3438
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3439
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3440

    
3441
  if filter_master:
3442
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3443

    
3444
  if qfilter:
3445
    if len(qfilter) > 1:
3446
      final_filter = [qlang.OP_AND] + qfilter
3447
    else:
3448
      assert len(qfilter) == 1
3449
      final_filter = qfilter[0]
3450
  else:
3451
    final_filter = None
3452

    
3453
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3454

    
3455
  def _IsOffline(row):
3456
    (_, (_, offline), _) = row
3457
    return offline
3458

    
3459
  def _GetName(row):
3460
    ((_, name), _, _) = row
3461
    return name
3462

    
3463
  def _GetSip(row):
3464
    (_, _, (_, sip)) = row
3465
    return sip
3466

    
3467
  (offline, online) = compat.partition(result.data, _IsOffline)
3468

    
3469
  if offline and not nowarn:
3470
    ToStderr("Note: skipping offline node(s): %s" %
3471
             utils.CommaJoin(map(_GetName, offline)))
3472

    
3473
  if secondary_ips:
3474
    fn = _GetSip
3475
  else:
3476
    fn = _GetName
3477

    
3478
  return map(fn, online)
3479

    
3480

    
3481
def _ToStream(stream, txt, *args):
3482
  """Write a message to a stream, bypassing the logging system
3483

3484
  @type stream: file object
3485
  @param stream: the file to which we should write
3486
  @type txt: str
3487
  @param txt: the message
3488

3489
  """
3490
  try:
3491
    if args:
3492
      args = tuple(args)
3493
      stream.write(txt % args)
3494
    else:
3495
      stream.write(txt)
3496
    stream.write("\n")
3497
    stream.flush()
3498
  except IOError, err:
3499
    if err.errno == errno.EPIPE:
3500
      # our terminal went away, we'll exit
3501
      sys.exit(constants.EXIT_FAILURE)
3502
    else:
3503
      raise
3504

    
3505

    
3506
def ToStdout(txt, *args):
3507
  """Write a message to stdout only, bypassing the logging system
3508

3509
  This is just a wrapper over _ToStream.
3510

3511
  @type txt: str
3512
  @param txt: the message
3513

3514
  """
3515
  _ToStream(sys.stdout, txt, *args)
3516

    
3517

    
3518
def ToStderr(txt, *args):
3519
  """Write a message to stderr only, bypassing the logging system
3520

3521
  This is just a wrapper over _ToStream.
3522

3523
  @type txt: str
3524
  @param txt: the message
3525

3526
  """
3527
  _ToStream(sys.stderr, txt, *args)
3528

    
3529

    
3530
class JobExecutor(object):
3531
  """Class which manages the submission and execution of multiple jobs.
3532

3533
  Note that instances of this class should not be reused between
3534
  GetResults() calls.
3535

3536
  """
3537
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3538
    self.queue = []
3539
    if cl is None:
3540
      cl = GetClient()
3541
    self.cl = cl
3542
    self.verbose = verbose
3543
    self.jobs = []
3544
    self.opts = opts
3545
    self.feedback_fn = feedback_fn
3546
    self._counter = itertools.count()
3547

    
3548
  @staticmethod
3549
  def _IfName(name, fmt):
3550
    """Helper function for formatting name.
3551

3552
    """
3553
    if name:
3554
      return fmt % name
3555

    
3556
    return ""
3557

    
3558
  def QueueJob(self, name, *ops):
3559
    """Record a job for later submit.
3560

3561
    @type name: string
3562
    @param name: a description of the job, will be used in WaitJobSet
3563

3564
    """
3565
    SetGenericOpcodeOpts(ops, self.opts)
3566
    self.queue.append((self._counter.next(), name, ops))
3567

    
3568
  def AddJobId(self, name, status, job_id):
3569
    """Adds a job ID to the internal queue.
3570

3571
    """
3572
    self.jobs.append((self._counter.next(), status, job_id, name))
3573

    
3574
  def SubmitPending(self, each=False):
3575
    """Submit all pending jobs.
3576

3577
    """
3578
    if each:
3579
      results = []
3580
      for (_, _, ops) in self.queue:
3581
        # SubmitJob will remove the success status, but raise an exception if
3582
        # the submission fails, so we'll notice that anyway.
3583
        results.append([True, self.cl.SubmitJob(ops)[0]])
3584
    else:
3585
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3586
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3587
      self.jobs.append((idx, status, data, name))
3588

    
3589
  def _ChooseJob(self):
3590
    """Choose a non-waiting/queued job to poll next.
3591

3592
    """
3593
    assert self.jobs, "_ChooseJob called with empty job list"
3594

    
3595
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3596
                               ["status"])
3597
    assert result
3598

    
3599
    for job_data, status in zip(self.jobs, result):
3600
      if (isinstance(status, list) and status and
3601
          status[0] in (constants.JOB_STATUS_QUEUED,
3602
                        constants.JOB_STATUS_WAITING,
3603
                        constants.JOB_STATUS_CANCELING)):
3604
        # job is still present and waiting
3605
        continue
3606
      # good candidate found (either running job or lost job)
3607
      self.jobs.remove(job_data)
3608
      return job_data
3609

    
3610
    # no job found
3611
    return self.jobs.pop(0)
3612

    
3613
  def GetResults(self):
3614
    """Wait for and return the results of all jobs.
3615

3616
    @rtype: list
3617
    @return: list of tuples (success, job results), in the same order
3618
        as the submitted jobs; if a job has failed, instead of the result
3619
        there will be the error message
3620

3621
    """
3622
    if not self.jobs:
3623
      self.SubmitPending()
3624
    results = []
3625
    if self.verbose:
3626
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3627
      if ok_jobs:
3628
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3629

    
3630
    # first, remove any non-submitted jobs
3631
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3632
    for idx, _, jid, name in failures:
3633
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3634
      results.append((idx, False, jid))
3635

    
3636
    while self.jobs:
3637
      (idx, _, jid, name) = self._ChooseJob()
3638
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3639
      try:
3640
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3641
        success = True
3642
      except errors.JobLost, err:
3643
        _, job_result = FormatError(err)
3644
        ToStderr("Job %s%s has been archived, cannot check its result",
3645
                 jid, self._IfName(name, " for %s"))
3646
        success = False
3647
      except (errors.GenericError, luxi.ProtocolError), err:
3648
        _, job_result = FormatError(err)
3649
        success = False
3650
        # the error message will always be shown, verbose or not
3651
        ToStderr("Job %s%s has failed: %s",
3652
                 jid, self._IfName(name, " for %s"), job_result)
3653

    
3654
      results.append((idx, success, job_result))
3655

    
3656
    # sort based on the index, then drop it
3657
    results.sort()
3658
    results = [i[1:] for i in results]
3659

    
3660
    return results
3661

    
3662
  def WaitOrShow(self, wait):
3663
    """Wait for job results or only print the job IDs.
3664

3665
    @type wait: boolean
3666
    @param wait: whether to wait or not
3667

3668
    """
3669
    if wait:
3670
      return self.GetResults()
3671
    else:
3672
      if not self.jobs:
3673
        self.SubmitPending()
3674
      for _, status, result, name in self.jobs:
3675
        if status:
3676
          ToStdout("%s: %s", result, name)
3677
        else:
3678
          ToStderr("Failure for %s: %s", name, result)
3679
      return [row[1:3] for row in self.jobs]
3680

    
3681

    
3682
def FormatParamsDictInfo(param_dict, actual):
3683
  """Formats a parameter dictionary.
3684

3685
  @type param_dict: dict
3686
  @param param_dict: the own parameters
3687
  @type actual: dict
3688
  @param actual: the current parameter set (including defaults)
3689
  @rtype: dict
3690
  @return: dictionary where the value of each parameter is either a fully
3691
      formatted string or a dictionary containing formatted strings
3692

3693
  """
3694
  ret = {}
3695
  for (key, data) in actual.items():
3696
    if isinstance(data, dict) and data:
3697
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3698
    else:
3699
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3700
  return ret
3701

    
3702

    
3703
def _FormatListInfoDefault(data, def_data):
3704
  if data is not None:
3705
    ret = utils.CommaJoin(data)
3706
  else:
3707
    ret = "default (%s)" % utils.CommaJoin(def_data)
3708
  return ret
3709

    
3710

    
3711
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3712
  """Formats an instance policy.
3713

3714
  @type custom_ipolicy: dict
3715
  @param custom_ipolicy: own policy
3716
  @type eff_ipolicy: dict
3717
  @param eff_ipolicy: effective policy (including defaults); ignored for
3718
      cluster
3719
  @type iscluster: bool
3720
  @param iscluster: the policy is at cluster level
3721
  @rtype: list of pairs
3722
  @return: formatted data, suitable for L{PrintGenericInfo}
3723

3724
  """
3725
  if iscluster:
3726
    eff_ipolicy = custom_ipolicy
3727

    
3728
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3729
  ret = [
3730
    (key,
3731
     FormatParamsDictInfo(custom_minmax.get(key, {}),
3732
                          eff_ipolicy[constants.ISPECS_MINMAX][key]))
3733
    for key in constants.ISPECS_MINMAX_KEYS
3734
    ]
3735
  if iscluster:
3736
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3737
    ret.append(
3738
      (constants.ISPECS_STD,
3739
       FormatParamsDictInfo(stdspecs, stdspecs))
3740
      )
3741

    
3742
  ret.append(
3743
    ("enabled disk templates",
3744
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3745
                            eff_ipolicy[constants.IPOLICY_DTS]))
3746
    )
3747
  ret.extend([
3748
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3749
    for key in constants.IPOLICY_PARAMETERS
3750
    ])
3751
  return ret
3752

    
3753

    
3754
def ConfirmOperation(names, list_type, text, extra=""):
3755
  """Ask the user to confirm an operation on a list of list_type.
3756

3757
  This function is used to request confirmation for doing an operation
3758
  on a given list of list_type.
3759

3760
  @type names: list
3761
  @param names: the list of names that we display when
3762
      we ask for confirmation
3763
  @type list_type: str
3764
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3765
  @type text: str
3766
  @param text: the operation that the user should confirm
3767
  @rtype: boolean
3768
  @return: True or False depending on user's confirmation.
3769

3770
  """
3771
  count = len(names)
3772
  msg = ("The %s will operate on %d %s.\n%s"
3773
         "Do you want to continue?" % (text, count, list_type, extra))
3774
  affected = (("\nAffected %s:\n" % list_type) +
3775
              "\n".join(["  %s" % name for name in names]))
3776

    
3777
  choices = [("y", True, "Yes, execute the %s" % text),
3778
             ("n", False, "No, abort the %s" % text)]
3779

    
3780
  if count > 20:
3781
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3782
    question = msg
3783
  else:
3784
    question = msg + affected
3785

    
3786
  choice = AskUser(question, choices)
3787
  if choice == "v":
3788
    choices.pop(1)
3789
    choice = AskUser(msg + affected, choices)
3790
  return choice
3791

    
3792

    
3793
def _MaybeParseUnit(elements):
3794
  """Parses and returns an array of potential values with units.
3795

3796
  """
3797
  parsed = {}
3798
  for k, v in elements.items():
3799
    if v == constants.VALUE_DEFAULT:
3800
      parsed[k] = v
3801
    else:
3802
      parsed[k] = utils.ParseUnit(v)
3803
  return parsed
3804

    
3805

    
3806
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3807
                             ispecs_disk_count, ispecs_disk_size,
3808
                             ispecs_nic_count, group_ipolicy, allowed_values):
3809
  try:
3810
    if ispecs_mem_size:
3811
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3812
    if ispecs_disk_size:
3813
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3814
  except (TypeError, ValueError, errors.UnitParseError), err:
3815
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3816
                               " in policy: %s" %
3817
                               (ispecs_disk_size, ispecs_mem_size, err),
3818
                               errors.ECODE_INVAL)
3819

    
3820
  # prepare ipolicy dict
3821
  ispecs_transposed = {
3822
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3823
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3824
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3825
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3826
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3827
    }
3828

    
3829
  # first, check that the values given are correct
3830
  if group_ipolicy:
3831
    forced_type = TISPECS_GROUP_TYPES
3832
  else:
3833
    forced_type = TISPECS_CLUSTER_TYPES
3834
  for specs in ispecs_transposed.values():
3835
    assert type(specs) is dict
3836
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3837

    
3838
  # then transpose
3839
  ispecs = {
3840
    constants.ISPECS_MIN: {},
3841
    constants.ISPECS_MAX: {},
3842
    constants.ISPECS_STD: {},
3843
    }
3844
  for (name, specs) in ispecs_transposed.iteritems():
3845
    assert name in constants.ISPECS_PARAMETERS
3846
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3847
      assert key in ispecs
3848
      ispecs[key][name] = val
3849
  for key in constants.ISPECS_MINMAX_KEYS:
3850
    ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
3851
  ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3852

    
3853

    
3854
def _ParseSpecUnit(spec, keyname):
3855
  ret = spec.copy()
3856
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3857
    if k in ret and ret[k] != constants.VALUE_DEFAULT:
3858
      try:
3859
        ret[k] = utils.ParseUnit(ret[k])
3860
      except (TypeError, ValueError, errors.UnitParseError), err:
3861
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3862
                                    " specs: %s" % (k, ret[k], keyname, err)),
3863
                                   errors.ECODE_INVAL)
3864
  return ret
3865

    
3866

    
3867
def _ParseISpec(spec, keyname, allowed_values):
3868
  ret = _ParseSpecUnit(spec, keyname)
3869
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES,
3870
                      allowed_values=allowed_values)
3871
  return ret
3872

    
3873

    
3874
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3875
                            group_ipolicy, allowed_values):
3876
  if minmax_ispecs is not None:
3877
    minmax_out = {}
3878
    for (key, spec) in minmax_ispecs.items():
3879
      if key not in constants.ISPECS_MINMAX_KEYS:
3880
        msg = "Invalid key in bounds instance specifications: %s" % key
3881
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3882
      minmax_out[key] = _ParseISpec(spec, key, allowed_values)
3883
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3884
  if std_ispecs is not None:
3885
    assert not group_ipolicy # This is not an option for gnt-group
3886
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std",
3887
                                                    allowed_values)
3888

    
3889

    
3890
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3891
                          ispecs_cpu_count=None,
3892
                          ispecs_disk_count=None,
3893
                          ispecs_disk_size=None,
3894
                          ispecs_nic_count=None,
3895
                          minmax_ispecs=None,
3896
                          std_ispecs=None,
3897
                          ipolicy_disk_templates=None,
3898
                          ipolicy_vcpu_ratio=None,
3899
                          ipolicy_spindle_ratio=None,
3900
                          group_ipolicy=False,
3901
                          allowed_values=None,
3902
                          fill_all=False):
3903
  """Creation of instance policy based on command line options.
3904

3905
  @param fill_all: whether for cluster policies we should ensure that
3906
    all values are filled
3907

3908

3909
  """
3910
  if ((ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
3911
       ispecs_disk_size or ispecs_nic_count) and
3912
      (minmax_ispecs is not None or std_ispecs is not None)):
3913
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
3914
                               " together with any --ipolicy-xxx-specs option",
3915
                               errors.ECODE_INVAL)
3916

    
3917
  ipolicy_out = objects.MakeEmptyIPolicy()
3918
  if minmax_ispecs is None and std_ispecs is None:
3919
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
3920
                             ispecs_disk_count, ispecs_disk_size,
3921
                             ispecs_nic_count, group_ipolicy, allowed_values)
3922
  else:
3923
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3924
                            group_ipolicy, allowed_values)
3925

    
3926
  if ipolicy_disk_templates is not None:
3927
    if allowed_values and ipolicy_disk_templates in allowed_values:
3928
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
3929
    else:
3930
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3931
  if ipolicy_vcpu_ratio is not None:
3932
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3933
  if ipolicy_spindle_ratio is not None:
3934
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3935

    
3936
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3937

    
3938
  if not group_ipolicy and fill_all:
3939
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
3940

    
3941
  return ipolicy_out
3942

    
3943

    
3944
def _SerializeGenericInfo(buf, data, level, afterkey=False):
3945
  """Formatting core of L{PrintGenericInfo}.
3946

3947
  @param buf: (string) stream to accumulate the result into
3948
  @param data: data to format
3949
  @type level: int
3950
  @param level: depth in the data hierarchy, used for indenting
3951
  @type afterkey: bool
3952
  @param afterkey: True when we are in the middle of a line after a key (used
3953
      to properly add newlines or indentation)
3954

3955
  """
3956
  baseind = "  "
3957
  if isinstance(data, dict):
3958
    if not data:
3959
      buf.write("\n")
3960
    else:
3961
      if afterkey:
3962
        buf.write("\n")
3963
        doindent = True
3964
      else:
3965
        doindent = False
3966
      for key in sorted(data):
3967
        if doindent:
3968
          buf.write(baseind * level)
3969
        else:
3970
          doindent = True
3971
        buf.write(key)
3972
        buf.write(": ")
3973
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
3974
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
3975
    # list of tuples (an ordered dictionary)
3976
    if afterkey:
3977
      buf.write("\n")
3978
      doindent = True
3979
    else:
3980
      doindent = False
3981
    for (key, val) in data:
3982
      if doindent:
3983
        buf.write(baseind * level)
3984
      else:
3985
        doindent = True
3986
      buf.write(key)
3987
      buf.write(": ")
3988
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
3989
  elif isinstance(data, list):
3990
    if not data:
3991
      buf.write("\n")
3992
    else:
3993
      if afterkey:
3994
        buf.write("\n")
3995
        doindent = True
3996
      else:
3997
        doindent = False
3998
      for item in data:
3999
        if doindent:
4000
          buf.write(baseind * level)
4001
        else:
4002
          doindent = True
4003
        buf.write("-")
4004
        buf.write(baseind[1:])
4005
        _SerializeGenericInfo(buf, item, level + 1)
4006
  else:
4007
    # This branch should be only taken for strings, but it's practically
4008
    # impossible to guarantee that no other types are produced somewhere
4009
    buf.write(str(data))
4010
    buf.write("\n")
4011

    
4012

    
4013
def PrintGenericInfo(data):
4014
  """Print information formatted according to the hierarchy.
4015

4016
  The output is a valid YAML string.
4017

4018
  @param data: the data to print. It's a hierarchical structure whose elements
4019
      can be:
4020
        - dictionaries, where keys are strings and values are of any of the
4021
          types listed here
4022
        - lists of pairs (key, value), where key is a string and value is of
4023
          any of the types listed here; it's a way to encode ordered
4024
          dictionaries
4025
        - lists of any of the types listed here
4026
        - strings
4027

4028
  """
4029
  buf = StringIO()
4030
  _SerializeGenericInfo(buf, data, 0)
4031
  ToStdout(buf.getvalue().rstrip("\n"))