Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 1d4a4b26

History | View | Annotate | Download (135.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INCLUDEDEFAULTS_OPT",
112
  "INTERVAL_OPT",
113
  "MAC_PREFIX_OPT",
114
  "MAINTAIN_NODE_HEALTH_OPT",
115
  "MASTER_NETDEV_OPT",
116
  "MASTER_NETMASK_OPT",
117
  "MC_OPT",
118
  "MIGRATION_MODE_OPT",
119
  "NET_OPT",
120
  "NETWORK_OPT",
121
  "NETWORK6_OPT",
122
  "NEW_CLUSTER_CERT_OPT",
123
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
124
  "NEW_CONFD_HMAC_KEY_OPT",
125
  "NEW_RAPI_CERT_OPT",
126
  "NEW_PRIMARY_OPT",
127
  "NEW_SECONDARY_OPT",
128
  "NEW_SPICE_CERT_OPT",
129
  "NIC_PARAMS_OPT",
130
  "NOCONFLICTSCHECK_OPT",
131
  "NODE_FORCE_JOIN_OPT",
132
  "NODE_LIST_OPT",
133
  "NODE_PLACEMENT_OPT",
134
  "NODEGROUP_OPT",
135
  "NODE_PARAMS_OPT",
136
  "NODE_POWERED_OPT",
137
  "NODRBD_STORAGE_OPT",
138
  "NOHDR_OPT",
139
  "NOIPCHECK_OPT",
140
  "NO_INSTALL_OPT",
141
  "NONAMECHECK_OPT",
142
  "NOLVM_STORAGE_OPT",
143
  "NOMODIFY_ETCHOSTS_OPT",
144
  "NOMODIFY_SSH_SETUP_OPT",
145
  "NONICS_OPT",
146
  "NONLIVE_OPT",
147
  "NONPLUS1_OPT",
148
  "NORUNTIME_CHGS_OPT",
149
  "NOSHUTDOWN_OPT",
150
  "NOSTART_OPT",
151
  "NOSSH_KEYCHECK_OPT",
152
  "NOVOTING_OPT",
153
  "NO_REMEMBER_OPT",
154
  "NWSYNC_OPT",
155
  "OFFLINE_INST_OPT",
156
  "ONLINE_INST_OPT",
157
  "ON_PRIMARY_OPT",
158
  "ON_SECONDARY_OPT",
159
  "OFFLINE_OPT",
160
  "OSPARAMS_OPT",
161
  "OS_OPT",
162
  "OS_SIZE_OPT",
163
  "OOB_TIMEOUT_OPT",
164
  "POWER_DELAY_OPT",
165
  "PREALLOC_WIPE_DISKS_OPT",
166
  "PRIMARY_IP_VERSION_OPT",
167
  "PRIMARY_ONLY_OPT",
168
  "PRIORITY_OPT",
169
  "RAPI_CERT_OPT",
170
  "READD_OPT",
171
  "REASON_OPT",
172
  "REBOOT_TYPE_OPT",
173
  "REMOVE_INSTANCE_OPT",
174
  "REMOVE_RESERVED_IPS_OPT",
175
  "REMOVE_UIDS_OPT",
176
  "RESERVED_LVS_OPT",
177
  "RUNTIME_MEM_OPT",
178
  "ROMAN_OPT",
179
  "SECONDARY_IP_OPT",
180
  "SECONDARY_ONLY_OPT",
181
  "SELECT_OS_OPT",
182
  "SEP_OPT",
183
  "SHOWCMD_OPT",
184
  "SHOW_MACHINE_OPT",
185
  "SHUTDOWN_TIMEOUT_OPT",
186
  "SINGLE_NODE_OPT",
187
  "SPECS_CPU_COUNT_OPT",
188
  "SPECS_DISK_COUNT_OPT",
189
  "SPECS_DISK_SIZE_OPT",
190
  "SPECS_MEM_SIZE_OPT",
191
  "SPECS_NIC_COUNT_OPT",
192
  "SPLIT_ISPECS_OPTS",
193
  "IPOLICY_STD_SPECS_OPT",
194
  "IPOLICY_DISK_TEMPLATES",
195
  "IPOLICY_VCPU_RATIO",
196
  "SPICE_CACERT_OPT",
197
  "SPICE_CERT_OPT",
198
  "SRC_DIR_OPT",
199
  "SRC_NODE_OPT",
200
  "SUBMIT_OPT",
201
  "STARTUP_PAUSED_OPT",
202
  "STATIC_OPT",
203
  "SYNC_OPT",
204
  "TAG_ADD_OPT",
205
  "TAG_SRC_OPT",
206
  "TIMEOUT_OPT",
207
  "TO_GROUP_OPT",
208
  "UIDPOOL_OPT",
209
  "USEUNITS_OPT",
210
  "USE_EXTERNAL_MIP_SCRIPT",
211
  "USE_REPL_NET_OPT",
212
  "VERBOSE_OPT",
213
  "VG_NAME_OPT",
214
  "WFSYNC_OPT",
215
  "YES_DOIT_OPT",
216
  "DISK_STATE_OPT",
217
  "HV_STATE_OPT",
218
  "IGNORE_IPOLICY_OPT",
219
  "INSTANCE_POLICY_OPTS",
220
  # Generic functions for CLI programs
221
  "ConfirmOperation",
222
  "CreateIPolicyFromOpts",
223
  "GenericMain",
224
  "GenericInstanceCreate",
225
  "GenericList",
226
  "GenericListFields",
227
  "GetClient",
228
  "GetOnlineNodes",
229
  "JobExecutor",
230
  "JobSubmittedException",
231
  "ParseTimespec",
232
  "RunWhileClusterStopped",
233
  "SubmitOpCode",
234
  "SubmitOrSend",
235
  "UsesRPC",
236
  # Formatting functions
237
  "ToStderr", "ToStdout",
238
  "FormatError",
239
  "FormatQueryResult",
240
  "FormatParamsDictInfo",
241
  "FormatPolicyInfo",
242
  "PrintIPolicyCommand",
243
  "PrintGenericInfo",
244
  "GenerateTable",
245
  "AskUser",
246
  "FormatTimestamp",
247
  "FormatLogMessage",
248
  # Tags functions
249
  "ListTags",
250
  "AddTags",
251
  "RemoveTags",
252
  # command line options support infrastructure
253
  "ARGS_MANY_INSTANCES",
254
  "ARGS_MANY_NODES",
255
  "ARGS_MANY_GROUPS",
256
  "ARGS_MANY_NETWORKS",
257
  "ARGS_NONE",
258
  "ARGS_ONE_INSTANCE",
259
  "ARGS_ONE_NODE",
260
  "ARGS_ONE_GROUP",
261
  "ARGS_ONE_OS",
262
  "ARGS_ONE_NETWORK",
263
  "ArgChoice",
264
  "ArgCommand",
265
  "ArgFile",
266
  "ArgGroup",
267
  "ArgHost",
268
  "ArgInstance",
269
  "ArgJobId",
270
  "ArgNetwork",
271
  "ArgNode",
272
  "ArgOs",
273
  "ArgExtStorage",
274
  "ArgSuggest",
275
  "ArgUnknown",
276
  "OPT_COMPL_INST_ADD_NODES",
277
  "OPT_COMPL_MANY_NODES",
278
  "OPT_COMPL_ONE_IALLOCATOR",
279
  "OPT_COMPL_ONE_INSTANCE",
280
  "OPT_COMPL_ONE_NODE",
281
  "OPT_COMPL_ONE_NODEGROUP",
282
  "OPT_COMPL_ONE_NETWORK",
283
  "OPT_COMPL_ONE_OS",
284
  "OPT_COMPL_ONE_EXTSTORAGE",
285
  "cli_option",
286
  "SplitNodeOption",
287
  "CalculateOSNames",
288
  "ParseFields",
289
  "COMMON_CREATE_OPTS",
290
  ]
291

    
292
NO_PREFIX = "no_"
293
UN_PREFIX = "-"
294

    
295
#: Priorities (sorted)
296
_PRIORITY_NAMES = [
297
  ("low", constants.OP_PRIO_LOW),
298
  ("normal", constants.OP_PRIO_NORMAL),
299
  ("high", constants.OP_PRIO_HIGH),
300
  ]
301

    
302
#: Priority dictionary for easier lookup
303
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
304
# we migrate to Python 2.6
305
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
306

    
307
# Query result status for clients
308
(QR_NORMAL,
309
 QR_UNKNOWN,
310
 QR_INCOMPLETE) = range(3)
311

    
312
#: Maximum batch size for ChooseJob
313
_CHOOSE_BATCH = 25
314

    
315

    
316
# constants used to create InstancePolicy dictionary
317
TISPECS_GROUP_TYPES = {
318
  constants.ISPECS_MIN: constants.VTYPE_INT,
319
  constants.ISPECS_MAX: constants.VTYPE_INT,
320
  }
321

    
322
TISPECS_CLUSTER_TYPES = {
323
  constants.ISPECS_MIN: constants.VTYPE_INT,
324
  constants.ISPECS_MAX: constants.VTYPE_INT,
325
  constants.ISPECS_STD: constants.VTYPE_INT,
326
  }
327

    
328
#: User-friendly names for query2 field types
329
_QFT_NAMES = {
330
  constants.QFT_UNKNOWN: "Unknown",
331
  constants.QFT_TEXT: "Text",
332
  constants.QFT_BOOL: "Boolean",
333
  constants.QFT_NUMBER: "Number",
334
  constants.QFT_UNIT: "Storage size",
335
  constants.QFT_TIMESTAMP: "Timestamp",
336
  constants.QFT_OTHER: "Custom",
337
  }
338

    
339

    
340
class _Argument:
341
  def __init__(self, min=0, max=None): # pylint: disable=W0622
342
    self.min = min
343
    self.max = max
344

    
345
  def __repr__(self):
346
    return ("<%s min=%s max=%s>" %
347
            (self.__class__.__name__, self.min, self.max))
348

    
349

    
350
class ArgSuggest(_Argument):
351
  """Suggesting argument.
352

353
  Value can be any of the ones passed to the constructor.
354

355
  """
356
  # pylint: disable=W0622
357
  def __init__(self, min=0, max=None, choices=None):
358
    _Argument.__init__(self, min=min, max=max)
359
    self.choices = choices
360

    
361
  def __repr__(self):
362
    return ("<%s min=%s max=%s choices=%r>" %
363
            (self.__class__.__name__, self.min, self.max, self.choices))
364

    
365

    
366
class ArgChoice(ArgSuggest):
367
  """Choice argument.
368

369
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
370
  but value must be one of the choices.
371

372
  """
373

    
374

    
375
class ArgUnknown(_Argument):
376
  """Unknown argument to program (e.g. determined at runtime).
377

378
  """
379

    
380

    
381
class ArgInstance(_Argument):
382
  """Instances argument.
383

384
  """
385

    
386

    
387
class ArgNode(_Argument):
388
  """Node argument.
389

390
  """
391

    
392

    
393
class ArgNetwork(_Argument):
394
  """Network argument.
395

396
  """
397

    
398

    
399
class ArgGroup(_Argument):
400
  """Node group argument.
401

402
  """
403

    
404

    
405
class ArgJobId(_Argument):
406
  """Job ID argument.
407

408
  """
409

    
410

    
411
class ArgFile(_Argument):
412
  """File path argument.
413

414
  """
415

    
416

    
417
class ArgCommand(_Argument):
418
  """Command argument.
419

420
  """
421

    
422

    
423
class ArgHost(_Argument):
424
  """Host argument.
425

426
  """
427

    
428

    
429
class ArgOs(_Argument):
430
  """OS argument.
431

432
  """
433

    
434

    
435
class ArgExtStorage(_Argument):
436
  """ExtStorage argument.
437

438
  """
439

    
440

    
441
ARGS_NONE = []
442
ARGS_MANY_INSTANCES = [ArgInstance()]
443
ARGS_MANY_NETWORKS = [ArgNetwork()]
444
ARGS_MANY_NODES = [ArgNode()]
445
ARGS_MANY_GROUPS = [ArgGroup()]
446
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
447
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
448
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
449
# TODO
450
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
451
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
452

    
453

    
454
def _ExtractTagsObject(opts, args):
455
  """Extract the tag type object.
456

457
  Note that this function will modify its args parameter.
458

459
  """
460
  if not hasattr(opts, "tag_type"):
461
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
462
  kind = opts.tag_type
463
  if kind == constants.TAG_CLUSTER:
464
    retval = kind, None
465
  elif kind in (constants.TAG_NODEGROUP,
466
                constants.TAG_NODE,
467
                constants.TAG_NETWORK,
468
                constants.TAG_INSTANCE):
469
    if not args:
470
      raise errors.OpPrereqError("no arguments passed to the command",
471
                                 errors.ECODE_INVAL)
472
    name = args.pop(0)
473
    retval = kind, name
474
  else:
475
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
476
  return retval
477

    
478

    
479
def _ExtendTags(opts, args):
480
  """Extend the args if a source file has been given.
481

482
  This function will extend the tags with the contents of the file
483
  passed in the 'tags_source' attribute of the opts parameter. A file
484
  named '-' will be replaced by stdin.
485

486
  """
487
  fname = opts.tags_source
488
  if fname is None:
489
    return
490
  if fname == "-":
491
    new_fh = sys.stdin
492
  else:
493
    new_fh = open(fname, "r")
494
  new_data = []
495
  try:
496
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
497
    # because of python bug 1633941
498
    while True:
499
      line = new_fh.readline()
500
      if not line:
501
        break
502
      new_data.append(line.strip())
503
  finally:
504
    new_fh.close()
505
  args.extend(new_data)
506

    
507

    
508
def ListTags(opts, args):
509
  """List the tags on a given object.
510

511
  This is a generic implementation that knows how to deal with all
512
  three cases of tag objects (cluster, node, instance). The opts
513
  argument is expected to contain a tag_type field denoting what
514
  object type we work on.
515

516
  """
517
  kind, name = _ExtractTagsObject(opts, args)
518
  cl = GetClient(query=True)
519
  result = cl.QueryTags(kind, name)
520
  result = list(result)
521
  result.sort()
522
  for tag in result:
523
    ToStdout(tag)
524

    
525

    
526
def AddTags(opts, args):
527
  """Add tags on a given object.
528

529
  This is a generic implementation that knows how to deal with all
530
  three cases of tag objects (cluster, node, instance). The opts
531
  argument is expected to contain a tag_type field denoting what
532
  object type we work on.
533

534
  """
535
  kind, name = _ExtractTagsObject(opts, args)
536
  _ExtendTags(opts, args)
537
  if not args:
538
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
539
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
540
  SubmitOrSend(op, opts)
541

    
542

    
543
def RemoveTags(opts, args):
544
  """Remove tags from a given object.
545

546
  This is a generic implementation that knows how to deal with all
547
  three cases of tag objects (cluster, node, instance). The opts
548
  argument is expected to contain a tag_type field denoting what
549
  object type we work on.
550

551
  """
552
  kind, name = _ExtractTagsObject(opts, args)
553
  _ExtendTags(opts, args)
554
  if not args:
555
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
556
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
557
  SubmitOrSend(op, opts)
558

    
559

    
560
def check_unit(option, opt, value): # pylint: disable=W0613
561
  """OptParsers custom converter for units.
562

563
  """
564
  try:
565
    return utils.ParseUnit(value)
566
  except errors.UnitParseError, err:
567
    raise OptionValueError("option %s: %s" % (opt, err))
568

    
569

    
570
def _SplitKeyVal(opt, data, parse_prefixes):
571
  """Convert a KeyVal string into a dict.
572

573
  This function will convert a key=val[,...] string into a dict. Empty
574
  values will be converted specially: keys which have the prefix 'no_'
575
  will have the value=False and the prefix stripped, keys with the prefix
576
  "-" will have value=None and the prefix stripped, and the others will
577
  have value=True.
578

579
  @type opt: string
580
  @param opt: a string holding the option name for which we process the
581
      data, used in building error messages
582
  @type data: string
583
  @param data: a string of the format key=val,key=val,...
584
  @type parse_prefixes: bool
585
  @param parse_prefixes: whether to handle prefixes specially
586
  @rtype: dict
587
  @return: {key=val, key=val}
588
  @raises errors.ParameterError: if there are duplicate keys
589

590
  """
591
  kv_dict = {}
592
  if data:
593
    for elem in utils.UnescapeAndSplit(data, sep=","):
594
      if "=" in elem:
595
        key, val = elem.split("=", 1)
596
      elif parse_prefixes:
597
        if elem.startswith(NO_PREFIX):
598
          key, val = elem[len(NO_PREFIX):], False
599
        elif elem.startswith(UN_PREFIX):
600
          key, val = elem[len(UN_PREFIX):], None
601
        else:
602
          key, val = elem, True
603
      else:
604
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
605
                                    (elem, opt))
606
      if key in kv_dict:
607
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
608
                                    (key, opt))
609
      kv_dict[key] = val
610
  return kv_dict
611

    
612

    
613
def _SplitIdentKeyVal(opt, value, parse_prefixes):
614
  """Helper function to parse "ident:key=val,key=val" options.
615

616
  @type opt: string
617
  @param opt: option name, used in error messages
618
  @type value: string
619
  @param value: expected to be in the format "ident:key=val,key=val,..."
620
  @type parse_prefixes: bool
621
  @param parse_prefixes: whether to handle prefixes specially (see
622
      L{_SplitKeyVal})
623
  @rtype: tuple
624
  @return: (ident, {key=val, key=val})
625
  @raises errors.ParameterError: in case of duplicates or other parsing errors
626

627
  """
628
  if ":" not in value:
629
    ident, rest = value, ""
630
  else:
631
    ident, rest = value.split(":", 1)
632

    
633
  if parse_prefixes and ident.startswith(NO_PREFIX):
634
    if rest:
635
      msg = "Cannot pass options when removing parameter groups: %s" % value
636
      raise errors.ParameterError(msg)
637
    retval = (ident[len(NO_PREFIX):], False)
638
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
639
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
640
    if rest:
641
      msg = "Cannot pass options when removing parameter groups: %s" % value
642
      raise errors.ParameterError(msg)
643
    retval = (ident[len(UN_PREFIX):], None)
644
  else:
645
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
646
    retval = (ident, kv_dict)
647
  return retval
648

    
649

    
650
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
651
  """Custom parser for ident:key=val,key=val options.
652

653
  This will store the parsed values as a tuple (ident, {key: val}). As such,
654
  multiple uses of this option via action=append is possible.
655

656
  """
657
  return _SplitIdentKeyVal(opt, value, True)
658

    
659

    
660
def check_key_val(option, opt, value):  # pylint: disable=W0613
661
  """Custom parser class for key=val,key=val options.
662

663
  This will store the parsed values as a dict {key: val}.
664

665
  """
666
  return _SplitKeyVal(opt, value, True)
667

    
668

    
669
def _SplitListKeyVal(opt, value):
670
  retval = {}
671
  for elem in value.split("/"):
672
    if not elem:
673
      raise errors.ParameterError("Empty section in option '%s'" % opt)
674
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
675
    if ident in retval:
676
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
677
             (ident, opt, elem))
678
      raise errors.ParameterError(msg)
679
    retval[ident] = valdict
680
  return retval
681

    
682

    
683
def check_multilist_ident_key_val(_, opt, value):
684
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
685

686
  @rtype: list of dictionary
687
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
688

689
  """
690
  retval = []
691
  for line in value.split("//"):
692
    retval.append(_SplitListKeyVal(opt, line))
693
  return retval
694

    
695

    
696
def check_bool(option, opt, value): # pylint: disable=W0613
697
  """Custom parser for yes/no options.
698

699
  This will store the parsed value as either True or False.
700

701
  """
702
  value = value.lower()
703
  if value == constants.VALUE_FALSE or value == "no":
704
    return False
705
  elif value == constants.VALUE_TRUE or value == "yes":
706
    return True
707
  else:
708
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
709

    
710

    
711
def check_list(option, opt, value): # pylint: disable=W0613
712
  """Custom parser for comma-separated lists.
713

714
  """
715
  # we have to make this explicit check since "".split(",") is [""],
716
  # not an empty list :(
717
  if not value:
718
    return []
719
  else:
720
    return utils.UnescapeAndSplit(value)
721

    
722

    
723
def check_maybefloat(option, opt, value): # pylint: disable=W0613
724
  """Custom parser for float numbers which might be also defaults.
725

726
  """
727
  value = value.lower()
728

    
729
  if value == constants.VALUE_DEFAULT:
730
    return value
731
  else:
732
    return float(value)
733

    
734

    
735
# completion_suggestion is normally a list. Using numeric values not evaluating
736
# to False for dynamic completion.
737
(OPT_COMPL_MANY_NODES,
738
 OPT_COMPL_ONE_NODE,
739
 OPT_COMPL_ONE_INSTANCE,
740
 OPT_COMPL_ONE_OS,
741
 OPT_COMPL_ONE_EXTSTORAGE,
742
 OPT_COMPL_ONE_IALLOCATOR,
743
 OPT_COMPL_ONE_NETWORK,
744
 OPT_COMPL_INST_ADD_NODES,
745
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
746

    
747
OPT_COMPL_ALL = compat.UniqueFrozenset([
748
  OPT_COMPL_MANY_NODES,
749
  OPT_COMPL_ONE_NODE,
750
  OPT_COMPL_ONE_INSTANCE,
751
  OPT_COMPL_ONE_OS,
752
  OPT_COMPL_ONE_EXTSTORAGE,
753
  OPT_COMPL_ONE_IALLOCATOR,
754
  OPT_COMPL_ONE_NETWORK,
755
  OPT_COMPL_INST_ADD_NODES,
756
  OPT_COMPL_ONE_NODEGROUP,
757
  ])
758

    
759

    
760
class CliOption(Option):
761
  """Custom option class for optparse.
762

763
  """
764
  ATTRS = Option.ATTRS + [
765
    "completion_suggest",
766
    ]
767
  TYPES = Option.TYPES + (
768
    "multilistidentkeyval",
769
    "identkeyval",
770
    "keyval",
771
    "unit",
772
    "bool",
773
    "list",
774
    "maybefloat",
775
    )
776
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
777
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
778
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
779
  TYPE_CHECKER["keyval"] = check_key_val
780
  TYPE_CHECKER["unit"] = check_unit
781
  TYPE_CHECKER["bool"] = check_bool
782
  TYPE_CHECKER["list"] = check_list
783
  TYPE_CHECKER["maybefloat"] = check_maybefloat
784

    
785

    
786
# optparse.py sets make_option, so we do it for our own option class, too
787
cli_option = CliOption
788

    
789

    
790
_YORNO = "yes|no"
791

    
792
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
793
                       help="Increase debugging level")
794

    
795
NOHDR_OPT = cli_option("--no-headers", default=False,
796
                       action="store_true", dest="no_headers",
797
                       help="Don't display column headers")
798

    
799
SEP_OPT = cli_option("--separator", default=None,
800
                     action="store", dest="separator",
801
                     help=("Separator between output fields"
802
                           " (defaults to one space)"))
803

    
804
USEUNITS_OPT = cli_option("--units", default=None,
805
                          dest="units", choices=("h", "m", "g", "t"),
806
                          help="Specify units for output (one of h/m/g/t)")
807

    
808
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
809
                        type="string", metavar="FIELDS",
810
                        help="Comma separated list of output fields")
811

    
812
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
813
                       default=False, help="Force the operation")
814

    
815
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
816
                         default=False, help="Do not require confirmation")
817

    
818
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
819
                                  action="store_true", default=False,
820
                                  help=("Ignore offline nodes and do as much"
821
                                        " as possible"))
822

    
823
TAG_ADD_OPT = cli_option("--tags", dest="tags",
824
                         default=None, help="Comma-separated list of instance"
825
                                            " tags")
826

    
827
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
828
                         default=None, help="File with tag names")
829

    
830
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
831
                        default=False, action="store_true",
832
                        help=("Submit the job and return the job ID, but"
833
                              " don't wait for the job to finish"))
834

    
835
SYNC_OPT = cli_option("--sync", dest="do_locking",
836
                      default=False, action="store_true",
837
                      help=("Grab locks while doing the queries"
838
                            " in order to ensure more consistent results"))
839

    
840
DRY_RUN_OPT = cli_option("--dry-run", default=False,
841
                         action="store_true",
842
                         help=("Do not execute the operation, just run the"
843
                               " check steps and verify if it could be"
844
                               " executed"))
845

    
846
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
847
                         action="store_true",
848
                         help="Increase the verbosity of the operation")
849

    
850
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
851
                              action="store_true", dest="simulate_errors",
852
                              help="Debugging option that makes the operation"
853
                              " treat most runtime checks as failed")
854

    
855
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
856
                        default=True, action="store_false",
857
                        help="Don't wait for sync (DANGEROUS!)")
858

    
859
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
860
                        default=False, action="store_true",
861
                        help="Wait for disks to sync")
862

    
863
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
864
                             action="store_true", default=False,
865
                             help="Enable offline instance")
866

    
867
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
868
                              action="store_true", default=False,
869
                              help="Disable down instance")
870

    
871
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
872
                               help=("Custom disk setup (%s)" %
873
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
874
                               default=None, metavar="TEMPL",
875
                               choices=list(constants.DISK_TEMPLATES))
876

    
877
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
878
                        help="Do not create any network cards for"
879
                        " the instance")
880

    
881
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
882
                               help="Relative path under default cluster-wide"
883
                               " file storage dir to store file-based disks",
884
                               default=None, metavar="<DIR>")
885

    
886
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
887
                                  help="Driver to use for image files",
888
                                  default="loop", metavar="<DRIVER>",
889
                                  choices=list(constants.FILE_DRIVER))
890

    
891
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
892
                            help="Select nodes for the instance automatically"
893
                            " using the <NAME> iallocator plugin",
894
                            default=None, type="string",
895
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
896

    
897
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
898
                                    metavar="<NAME>",
899
                                    help="Set the default instance"
900
                                    " allocator plugin",
901
                                    default=None, type="string",
902
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
903

    
904
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
905
                    metavar="<os>",
906
                    completion_suggest=OPT_COMPL_ONE_OS)
907

    
908
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
909
                          type="keyval", default={},
910
                          help="OS parameters")
911

    
912
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
913
                               action="store_true", default=False,
914
                               help="Force an unknown variant")
915

    
916
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
917
                            action="store_true", default=False,
918
                            help="Do not install the OS (will"
919
                            " enable no-start)")
920

    
921
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
922
                                dest="allow_runtime_chgs",
923
                                default=True, action="store_false",
924
                                help="Don't allow runtime changes")
925

    
926
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
927
                         type="keyval", default={},
928
                         help="Backend parameters")
929

    
930
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
931
                        default={}, dest="hvparams",
932
                        help="Hypervisor parameters")
933

    
934
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
935
                             help="Disk template parameters, in the format"
936
                             " template:option=value,option=value,...",
937
                             type="identkeyval", action="append", default=[])
938

    
939
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
940
                                 type="keyval", default={},
941
                                 help="Memory size specs: list of key=value,"
942
                                " where key is one of min, max, std"
943
                                 " (in MB or using a unit)")
944

    
945
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
946
                                 type="keyval", default={},
947
                                 help="CPU count specs: list of key=value,"
948
                                 " where key is one of min, max, std")
949

    
950
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
951
                                  dest="ispecs_disk_count",
952
                                  type="keyval", default={},
953
                                  help="Disk count specs: list of key=value,"
954
                                  " where key is one of min, max, std")
955

    
956
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
957
                                 type="keyval", default={},
958
                                 help="Disk size specs: list of key=value,"
959
                                 " where key is one of min, max, std"
960
                                 " (in MB or using a unit)")
961

    
962
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
963
                                 type="keyval", default={},
964
                                 help="NIC count specs: list of key=value,"
965
                                 " where key is one of min, max, std")
966

    
967
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
968
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
969
                                      dest="ipolicy_bounds_specs",
970
                                      type="multilistidentkeyval", default=None,
971
                                      help="Complete instance specs limits")
972

    
973
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
974
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
975
                                   dest="ipolicy_std_specs",
976
                                   type="keyval", default=None,
977
                                   help="Complte standard instance specs")
978

    
979
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
980
                                    dest="ipolicy_disk_templates",
981
                                    type="list", default=None,
982
                                    help="Comma-separated list of"
983
                                    " enabled disk templates")
984

    
985
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
986
                                 dest="ipolicy_vcpu_ratio",
987
                                 type="maybefloat", default=None,
988
                                 help="The maximum allowed vcpu-to-cpu ratio")
989

    
990
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
991
                                   dest="ipolicy_spindle_ratio",
992
                                   type="maybefloat", default=None,
993
                                   help=("The maximum allowed instances to"
994
                                         " spindle ratio"))
995

    
996
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
997
                            help="Hypervisor and hypervisor options, in the"
998
                            " format hypervisor:option=value,option=value,...",
999
                            default=None, type="identkeyval")
1000

    
1001
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1002
                        help="Hypervisor and hypervisor options, in the"
1003
                        " format hypervisor:option=value,option=value,...",
1004
                        default=[], action="append", type="identkeyval")
1005

    
1006
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1007
                           action="store_false",
1008
                           help="Don't check that the instance's IP"
1009
                           " is alive")
1010

    
1011
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1012
                             default=True, action="store_false",
1013
                             help="Don't check that the instance's name"
1014
                             " is resolvable")
1015

    
1016
NET_OPT = cli_option("--net",
1017
                     help="NIC parameters", default=[],
1018
                     dest="nics", action="append", type="identkeyval")
1019

    
1020
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1021
                      dest="disks", action="append", type="identkeyval")
1022

    
1023
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1024
                         help="Comma-separated list of disks"
1025
                         " indices to act on (e.g. 0,2) (optional,"
1026
                         " defaults to all disks)")
1027

    
1028
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1029
                         help="Enforces a single-disk configuration using the"
1030
                         " given disk size, in MiB unless a suffix is used",
1031
                         default=None, type="unit", metavar="<size>")
1032

    
1033
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1034
                                dest="ignore_consistency",
1035
                                action="store_true", default=False,
1036
                                help="Ignore the consistency of the disks on"
1037
                                " the secondary")
1038

    
1039
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1040
                                dest="allow_failover",
1041
                                action="store_true", default=False,
1042
                                help="If migration is not possible fallback to"
1043
                                     " failover")
1044

    
1045
NONLIVE_OPT = cli_option("--non-live", dest="live",
1046
                         default=True, action="store_false",
1047
                         help="Do a non-live migration (this usually means"
1048
                         " freeze the instance, save the state, transfer and"
1049
                         " only then resume running on the secondary node)")
1050

    
1051
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1052
                                default=None,
1053
                                choices=list(constants.HT_MIGRATION_MODES),
1054
                                help="Override default migration mode (choose"
1055
                                " either live or non-live")
1056

    
1057
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1058
                                help="Target node and optional secondary node",
1059
                                metavar="<pnode>[:<snode>]",
1060
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1061

    
1062
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1063
                           action="append", metavar="<node>",
1064
                           help="Use only this node (can be used multiple"
1065
                           " times, if not given defaults to all nodes)",
1066
                           completion_suggest=OPT_COMPL_ONE_NODE)
1067

    
1068
NODEGROUP_OPT_NAME = "--node-group"
1069
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1070
                           dest="nodegroup",
1071
                           help="Node group (name or uuid)",
1072
                           metavar="<nodegroup>",
1073
                           default=None, type="string",
1074
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1075

    
1076
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1077
                             metavar="<node>",
1078
                             completion_suggest=OPT_COMPL_ONE_NODE)
1079

    
1080
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1081
                         action="store_false",
1082
                         help="Don't start the instance after creation")
1083

    
1084
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1085
                         action="store_true", default=False,
1086
                         help="Show command instead of executing it")
1087

    
1088
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1089
                         default=False, action="store_true",
1090
                         help="Instead of performing the migration, try to"
1091
                         " recover from a failed cleanup. This is safe"
1092
                         " to run even if the instance is healthy, but it"
1093
                         " will create extra replication traffic and "
1094
                         " disrupt briefly the replication (like during the"
1095
                         " migration")
1096

    
1097
STATIC_OPT = cli_option("-s", "--static", dest="static",
1098
                        action="store_true", default=False,
1099
                        help="Only show configuration data, not runtime data")
1100

    
1101
ALL_OPT = cli_option("--all", dest="show_all",
1102
                     default=False, action="store_true",
1103
                     help="Show info on all instances on the cluster."
1104
                     " This can take a long time to run, use wisely")
1105

    
1106
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1107
                           action="store_true", default=False,
1108
                           help="Interactive OS reinstall, lists available"
1109
                           " OS templates for selection")
1110

    
1111
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1112
                                 action="store_true", default=False,
1113
                                 help="Remove the instance from the cluster"
1114
                                 " configuration even if there are failures"
1115
                                 " during the removal process")
1116

    
1117
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1118
                                        dest="ignore_remove_failures",
1119
                                        action="store_true", default=False,
1120
                                        help="Remove the instance from the"
1121
                                        " cluster configuration even if there"
1122
                                        " are failures during the removal"
1123
                                        " process")
1124

    
1125
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1126
                                 action="store_true", default=False,
1127
                                 help="Remove the instance from the cluster")
1128

    
1129
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1130
                               help="Specifies the new node for the instance",
1131
                               metavar="NODE", default=None,
1132
                               completion_suggest=OPT_COMPL_ONE_NODE)
1133

    
1134
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1135
                               help="Specifies the new secondary node",
1136
                               metavar="NODE", default=None,
1137
                               completion_suggest=OPT_COMPL_ONE_NODE)
1138

    
1139
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1140
                             help="Specifies the new primary node",
1141
                             metavar="<node>", default=None,
1142
                             completion_suggest=OPT_COMPL_ONE_NODE)
1143

    
1144
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1145
                            default=False, action="store_true",
1146
                            help="Replace the disk(s) on the primary"
1147
                                 " node (applies only to internally mirrored"
1148
                                 " disk templates, e.g. %s)" %
1149
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1150

    
1151
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1152
                              default=False, action="store_true",
1153
                              help="Replace the disk(s) on the secondary"
1154
                                   " node (applies only to internally mirrored"
1155
                                   " disk templates, e.g. %s)" %
1156
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1157

    
1158
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1159
                              default=False, action="store_true",
1160
                              help="Lock all nodes and auto-promote as needed"
1161
                              " to MC status")
1162

    
1163
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1164
                              default=False, action="store_true",
1165
                              help="Automatically replace faulty disks"
1166
                                   " (applies only to internally mirrored"
1167
                                   " disk templates, e.g. %s)" %
1168
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1169

    
1170
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1171
                             default=False, action="store_true",
1172
                             help="Ignore current recorded size"
1173
                             " (useful for forcing activation when"
1174
                             " the recorded size is wrong)")
1175

    
1176
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1177
                          metavar="<node>",
1178
                          completion_suggest=OPT_COMPL_ONE_NODE)
1179

    
1180
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1181
                         metavar="<dir>")
1182

    
1183
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1184
                              help="Specify the secondary ip for the node",
1185
                              metavar="ADDRESS", default=None)
1186

    
1187
READD_OPT = cli_option("--readd", dest="readd",
1188
                       default=False, action="store_true",
1189
                       help="Readd old node after replacing it")
1190

    
1191
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1192
                                default=True, action="store_false",
1193
                                help="Disable SSH key fingerprint checking")
1194

    
1195
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1196
                                 default=False, action="store_true",
1197
                                 help="Force the joining of a node")
1198

    
1199
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1200
                    type="bool", default=None, metavar=_YORNO,
1201
                    help="Set the master_candidate flag on the node")
1202

    
1203
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1204
                         type="bool", default=None,
1205
                         help=("Set the offline flag on the node"
1206
                               " (cluster does not communicate with offline"
1207
                               " nodes)"))
1208

    
1209
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1210
                         type="bool", default=None,
1211
                         help=("Set the drained flag on the node"
1212
                               " (excluded from allocation operations)"))
1213

    
1214
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1215
                              type="bool", default=None, metavar=_YORNO,
1216
                              help="Set the master_capable flag on the node")
1217

    
1218
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1219
                          type="bool", default=None, metavar=_YORNO,
1220
                          help="Set the vm_capable flag on the node")
1221

    
1222
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1223
                             type="bool", default=None, metavar=_YORNO,
1224
                             help="Set the allocatable flag on a volume")
1225

    
1226
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1227
                               help="Disable support for lvm based instances"
1228
                               " (cluster-wide)",
1229
                               action="store_false", default=True)
1230

    
1231
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1232
                            dest="enabled_hypervisors",
1233
                            help="Comma-separated list of hypervisors",
1234
                            type="string", default=None)
1235

    
1236
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1237
                                        dest="enabled_disk_templates",
1238
                                        help="Comma-separated list of "
1239
                                             "disk templates",
1240
                                        type="string", default=None)
1241

    
1242
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1243
                            type="keyval", default={},
1244
                            help="NIC parameters")
1245

    
1246
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1247
                         dest="candidate_pool_size", type="int",
1248
                         help="Set the candidate pool size")
1249

    
1250
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1251
                         help=("Enables LVM and specifies the volume group"
1252
                               " name (cluster-wide) for disk allocation"
1253
                               " [%s]" % constants.DEFAULT_VG),
1254
                         metavar="VG", default=None)
1255

    
1256
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1257
                          help="Destroy cluster", action="store_true")
1258

    
1259
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1260
                          help="Skip node agreement check (dangerous)",
1261
                          action="store_true", default=False)
1262

    
1263
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1264
                            help="Specify the mac prefix for the instance IP"
1265
                            " addresses, in the format XX:XX:XX",
1266
                            metavar="PREFIX",
1267
                            default=None)
1268

    
1269
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1270
                               help="Specify the node interface (cluster-wide)"
1271
                               " on which the master IP address will be added"
1272
                               " (cluster init default: %s)" %
1273
                               constants.DEFAULT_BRIDGE,
1274
                               metavar="NETDEV",
1275
                               default=None)
1276

    
1277
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1278
                                help="Specify the netmask of the master IP",
1279
                                metavar="NETMASK",
1280
                                default=None)
1281

    
1282
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1283
                                     dest="use_external_mip_script",
1284
                                     help="Specify whether to run a"
1285
                                     " user-provided script for the master"
1286
                                     " IP address turnup and"
1287
                                     " turndown operations",
1288
                                     type="bool", metavar=_YORNO, default=None)
1289

    
1290
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1291
                                help="Specify the default directory (cluster-"
1292
                                "wide) for storing the file-based disks [%s]" %
1293
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1294
                                metavar="DIR",
1295
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1296

    
1297
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1298
  "--shared-file-storage-dir",
1299
  dest="shared_file_storage_dir",
1300
  help="Specify the default directory (cluster-wide) for storing the"
1301
  " shared file-based disks [%s]" %
1302
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1303
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1304

    
1305
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1306
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1307
                                   action="store_false", default=True)
1308

    
1309
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1310
                                    help="Don't initialize SSH keys",
1311
                                    action="store_false", default=True)
1312

    
1313
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1314
                             help="Enable parseable error messages",
1315
                             action="store_true", default=False)
1316

    
1317
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1318
                          help="Skip N+1 memory redundancy tests",
1319
                          action="store_true", default=False)
1320

    
1321
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1322
                             help="Type of reboot: soft/hard/full",
1323
                             default=constants.INSTANCE_REBOOT_HARD,
1324
                             metavar="<REBOOT>",
1325
                             choices=list(constants.REBOOT_TYPES))
1326

    
1327
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1328
                                    dest="ignore_secondaries",
1329
                                    default=False, action="store_true",
1330
                                    help="Ignore errors from secondaries")
1331

    
1332
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1333
                            action="store_false", default=True,
1334
                            help="Don't shutdown the instance (unsafe)")
1335

    
1336
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1337
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1338
                         help="Maximum time to wait")
1339

    
1340
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1341
                                  dest="shutdown_timeout", type="int",
1342
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1343
                                  help="Maximum time to wait for instance"
1344
                                  " shutdown")
1345

    
1346
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1347
                          default=None,
1348
                          help=("Number of seconds between repetions of the"
1349
                                " command"))
1350

    
1351
EARLY_RELEASE_OPT = cli_option("--early-release",
1352
                               dest="early_release", default=False,
1353
                               action="store_true",
1354
                               help="Release the locks on the secondary"
1355
                               " node(s) early")
1356

    
1357
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1358
                                  dest="new_cluster_cert",
1359
                                  default=False, action="store_true",
1360
                                  help="Generate a new cluster certificate")
1361

    
1362
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1363
                           default=None,
1364
                           help="File containing new RAPI certificate")
1365

    
1366
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1367
                               default=None, action="store_true",
1368
                               help=("Generate a new self-signed RAPI"
1369
                                     " certificate"))
1370

    
1371
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1372
                            default=None,
1373
                            help="File containing new SPICE certificate")
1374

    
1375
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1376
                              default=None,
1377
                              help="File containing the certificate of the CA"
1378
                              " which signed the SPICE certificate")
1379

    
1380
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1381
                                dest="new_spice_cert", default=None,
1382
                                action="store_true",
1383
                                help=("Generate a new self-signed SPICE"
1384
                                      " certificate"))
1385

    
1386
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1387
                                    dest="new_confd_hmac_key",
1388
                                    default=False, action="store_true",
1389
                                    help=("Create a new HMAC key for %s" %
1390
                                          constants.CONFD))
1391

    
1392
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1393
                                       dest="cluster_domain_secret",
1394
                                       default=None,
1395
                                       help=("Load new new cluster domain"
1396
                                             " secret from file"))
1397

    
1398
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1399
                                           dest="new_cluster_domain_secret",
1400
                                           default=False, action="store_true",
1401
                                           help=("Create a new cluster domain"
1402
                                                 " secret"))
1403

    
1404
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1405
                              dest="use_replication_network",
1406
                              help="Whether to use the replication network"
1407
                              " for talking to the nodes",
1408
                              action="store_true", default=False)
1409

    
1410
MAINTAIN_NODE_HEALTH_OPT = \
1411
    cli_option("--maintain-node-health", dest="maintain_node_health",
1412
               metavar=_YORNO, default=None, type="bool",
1413
               help="Configure the cluster to automatically maintain node"
1414
               " health, by shutting down unknown instances, shutting down"
1415
               " unknown DRBD devices, etc.")
1416

    
1417
IDENTIFY_DEFAULTS_OPT = \
1418
    cli_option("--identify-defaults", dest="identify_defaults",
1419
               default=False, action="store_true",
1420
               help="Identify which saved instance parameters are equal to"
1421
               " the current cluster defaults and set them as such, instead"
1422
               " of marking them as overridden")
1423

    
1424
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1425
                         action="store", dest="uid_pool",
1426
                         help=("A list of user-ids or user-id"
1427
                               " ranges separated by commas"))
1428

    
1429
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1430
                          action="store", dest="add_uids",
1431
                          help=("A list of user-ids or user-id"
1432
                                " ranges separated by commas, to be"
1433
                                " added to the user-id pool"))
1434

    
1435
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1436
                             action="store", dest="remove_uids",
1437
                             help=("A list of user-ids or user-id"
1438
                                   " ranges separated by commas, to be"
1439
                                   " removed from the user-id pool"))
1440

    
1441
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1442
                              action="store", dest="reserved_lvs",
1443
                              help=("A comma-separated list of reserved"
1444
                                    " logical volumes names, that will be"
1445
                                    " ignored by cluster verify"))
1446

    
1447
ROMAN_OPT = cli_option("--roman",
1448
                       dest="roman_integers", default=False,
1449
                       action="store_true",
1450
                       help="Use roman numbers for positive integers")
1451

    
1452
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1453
                             action="store", default=None,
1454
                             help="Specifies usermode helper for DRBD")
1455

    
1456
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1457
                                action="store_false", default=True,
1458
                                help="Disable support for DRBD")
1459

    
1460
PRIMARY_IP_VERSION_OPT = \
1461
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1462
               action="store", dest="primary_ip_version",
1463
               metavar="%d|%d" % (constants.IP4_VERSION,
1464
                                  constants.IP6_VERSION),
1465
               help="Cluster-wide IP version for primary IP")
1466

    
1467
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1468
                              action="store_true",
1469
                              help="Show machine name for every line in output")
1470

    
1471
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1472
                              action="store_true",
1473
                              help=("Hide successful results and show failures"
1474
                                    " only (determined by the exit code)"))
1475

    
1476
REASON_OPT = cli_option("--reason", default=None,
1477
                        help="The reason for executing the command")
1478

    
1479

    
1480
def _PriorityOptionCb(option, _, value, parser):
1481
  """Callback for processing C{--priority} option.
1482

1483
  """
1484
  value = _PRIONAME_TO_VALUE[value]
1485

    
1486
  setattr(parser.values, option.dest, value)
1487

    
1488

    
1489
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1490
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1491
                          choices=_PRIONAME_TO_VALUE.keys(),
1492
                          action="callback", type="choice",
1493
                          callback=_PriorityOptionCb,
1494
                          help="Priority for opcode processing")
1495

    
1496
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1497
                        type="bool", default=None, metavar=_YORNO,
1498
                        help="Sets the hidden flag on the OS")
1499

    
1500
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1501
                        type="bool", default=None, metavar=_YORNO,
1502
                        help="Sets the blacklisted flag on the OS")
1503

    
1504
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1505
                                     type="bool", metavar=_YORNO,
1506
                                     dest="prealloc_wipe_disks",
1507
                                     help=("Wipe disks prior to instance"
1508
                                           " creation"))
1509

    
1510
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1511
                             type="keyval", default=None,
1512
                             help="Node parameters")
1513

    
1514
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1515
                              action="store", metavar="POLICY", default=None,
1516
                              help="Allocation policy for the node group")
1517

    
1518
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1519
                              type="bool", metavar=_YORNO,
1520
                              dest="node_powered",
1521
                              help="Specify if the SoR for node is powered")
1522

    
1523
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1524
                             default=constants.OOB_TIMEOUT,
1525
                             help="Maximum time to wait for out-of-band helper")
1526

    
1527
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1528
                             default=constants.OOB_POWER_DELAY,
1529
                             help="Time in seconds to wait between power-ons")
1530

    
1531
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1532
                              action="store_true", default=False,
1533
                              help=("Whether command argument should be treated"
1534
                                    " as filter"))
1535

    
1536
NO_REMEMBER_OPT = cli_option("--no-remember",
1537
                             dest="no_remember",
1538
                             action="store_true", default=False,
1539
                             help="Perform but do not record the change"
1540
                             " in the configuration")
1541

    
1542
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1543
                              default=False, action="store_true",
1544
                              help="Evacuate primary instances only")
1545

    
1546
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1547
                                default=False, action="store_true",
1548
                                help="Evacuate secondary instances only"
1549
                                     " (applies only to internally mirrored"
1550
                                     " disk templates, e.g. %s)" %
1551
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1552

    
1553
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1554
                                action="store_true", default=False,
1555
                                help="Pause instance at startup")
1556

    
1557
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1558
                          help="Destination node group (name or uuid)",
1559
                          default=None, action="append",
1560
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1561

    
1562
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1563
                               action="append", dest="ignore_errors",
1564
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1565
                               help="Error code to be ignored")
1566

    
1567
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1568
                            action="append",
1569
                            help=("Specify disk state information in the"
1570
                                  " format"
1571
                                  " storage_type/identifier:option=value,...;"
1572
                                  " note this is unused for now"),
1573
                            type="identkeyval")
1574

    
1575
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1576
                          action="append",
1577
                          help=("Specify hypervisor state information in the"
1578
                                " format hypervisor:option=value,...;"
1579
                                " note this is unused for now"),
1580
                          type="identkeyval")
1581

    
1582
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1583
                                action="store_true", default=False,
1584
                                help="Ignore instance policy violations")
1585

    
1586
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1587
                             help="Sets the instance's runtime memory,"
1588
                             " ballooning it up or down to the new value",
1589
                             default=None, type="unit", metavar="<size>")
1590

    
1591
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1592
                          action="store_true", default=False,
1593
                          help="Marks the grow as absolute instead of the"
1594
                          " (default) relative mode")
1595

    
1596
NETWORK_OPT = cli_option("--network",
1597
                         action="store", default=None, dest="network",
1598
                         help="IP network in CIDR notation")
1599

    
1600
GATEWAY_OPT = cli_option("--gateway",
1601
                         action="store", default=None, dest="gateway",
1602
                         help="IP address of the router (gateway)")
1603

    
1604
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1605
                                  action="store", default=None,
1606
                                  dest="add_reserved_ips",
1607
                                  help="Comma-separated list of"
1608
                                  " reserved IPs to add")
1609

    
1610
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1611
                                     action="store", default=None,
1612
                                     dest="remove_reserved_ips",
1613
                                     help="Comma-delimited list of"
1614
                                     " reserved IPs to remove")
1615

    
1616
NETWORK6_OPT = cli_option("--network6",
1617
                          action="store", default=None, dest="network6",
1618
                          help="IP network in CIDR notation")
1619

    
1620
GATEWAY6_OPT = cli_option("--gateway6",
1621
                          action="store", default=None, dest="gateway6",
1622
                          help="IP6 address of the router (gateway)")
1623

    
1624
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1625
                                  dest="conflicts_check",
1626
                                  default=True,
1627
                                  action="store_false",
1628
                                  help="Don't check for conflicting IPs")
1629

    
1630
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1631
                                 default=False, action="store_true",
1632
                                 help="Include default values")
1633

    
1634
#: Options provided by all commands
1635
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1636

    
1637
# common options for creating instances. add and import then add their own
1638
# specific ones.
1639
COMMON_CREATE_OPTS = [
1640
  BACKEND_OPT,
1641
  DISK_OPT,
1642
  DISK_TEMPLATE_OPT,
1643
  FILESTORE_DIR_OPT,
1644
  FILESTORE_DRIVER_OPT,
1645
  HYPERVISOR_OPT,
1646
  IALLOCATOR_OPT,
1647
  NET_OPT,
1648
  NODE_PLACEMENT_OPT,
1649
  NOIPCHECK_OPT,
1650
  NOCONFLICTSCHECK_OPT,
1651
  NONAMECHECK_OPT,
1652
  NONICS_OPT,
1653
  NWSYNC_OPT,
1654
  OSPARAMS_OPT,
1655
  OS_SIZE_OPT,
1656
  SUBMIT_OPT,
1657
  TAG_ADD_OPT,
1658
  DRY_RUN_OPT,
1659
  PRIORITY_OPT,
1660
  ]
1661

    
1662
# common instance policy options
1663
INSTANCE_POLICY_OPTS = [
1664
  IPOLICY_BOUNDS_SPECS_OPT,
1665
  IPOLICY_DISK_TEMPLATES,
1666
  IPOLICY_VCPU_RATIO,
1667
  IPOLICY_SPINDLE_RATIO,
1668
  ]
1669

    
1670
# instance policy split specs options
1671
SPLIT_ISPECS_OPTS = [
1672
  SPECS_CPU_COUNT_OPT,
1673
  SPECS_DISK_COUNT_OPT,
1674
  SPECS_DISK_SIZE_OPT,
1675
  SPECS_MEM_SIZE_OPT,
1676
  SPECS_NIC_COUNT_OPT,
1677
  ]
1678

    
1679

    
1680
class _ShowUsage(Exception):
1681
  """Exception class for L{_ParseArgs}.
1682

1683
  """
1684
  def __init__(self, exit_error):
1685
    """Initializes instances of this class.
1686

1687
    @type exit_error: bool
1688
    @param exit_error: Whether to report failure on exit
1689

1690
    """
1691
    Exception.__init__(self)
1692
    self.exit_error = exit_error
1693

    
1694

    
1695
class _ShowVersion(Exception):
1696
  """Exception class for L{_ParseArgs}.
1697

1698
  """
1699

    
1700

    
1701
def _ParseArgs(binary, argv, commands, aliases, env_override):
1702
  """Parser for the command line arguments.
1703

1704
  This function parses the arguments and returns the function which
1705
  must be executed together with its (modified) arguments.
1706

1707
  @param binary: Script name
1708
  @param argv: Command line arguments
1709
  @param commands: Dictionary containing command definitions
1710
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1711
  @param env_override: list of env variables allowed for default args
1712
  @raise _ShowUsage: If usage description should be shown
1713
  @raise _ShowVersion: If version should be shown
1714

1715
  """
1716
  assert not (env_override - set(commands))
1717
  assert not (set(aliases.keys()) & set(commands.keys()))
1718

    
1719
  if len(argv) > 1:
1720
    cmd = argv[1]
1721
  else:
1722
    # No option or command given
1723
    raise _ShowUsage(exit_error=True)
1724

    
1725
  if cmd == "--version":
1726
    raise _ShowVersion()
1727
  elif cmd == "--help":
1728
    raise _ShowUsage(exit_error=False)
1729
  elif not (cmd in commands or cmd in aliases):
1730
    raise _ShowUsage(exit_error=True)
1731

    
1732
  # get command, unalias it, and look it up in commands
1733
  if cmd in aliases:
1734
    if aliases[cmd] not in commands:
1735
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1736
                                   " command '%s'" % (cmd, aliases[cmd]))
1737

    
1738
    cmd = aliases[cmd]
1739

    
1740
  if cmd in env_override:
1741
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1742
    env_args = os.environ.get(args_env_name)
1743
    if env_args:
1744
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1745

    
1746
  func, args_def, parser_opts, usage, description = commands[cmd]
1747
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1748
                        description=description,
1749
                        formatter=TitledHelpFormatter(),
1750
                        usage="%%prog %s %s" % (cmd, usage))
1751
  parser.disable_interspersed_args()
1752
  options, args = parser.parse_args(args=argv[2:])
1753

    
1754
  if not _CheckArguments(cmd, args_def, args):
1755
    return None, None, None
1756

    
1757
  return func, options, args
1758

    
1759

    
1760
def _FormatUsage(binary, commands):
1761
  """Generates a nice description of all commands.
1762

1763
  @param binary: Script name
1764
  @param commands: Dictionary containing command definitions
1765

1766
  """
1767
  # compute the max line length for cmd + usage
1768
  mlen = min(60, max(map(len, commands)))
1769

    
1770
  yield "Usage: %s {command} [options...] [argument...]" % binary
1771
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1772
  yield ""
1773
  yield "Commands:"
1774

    
1775
  # and format a nice command list
1776
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1777
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1778
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1779
    for line in help_lines:
1780
      yield " %-*s   %s" % (mlen, "", line)
1781

    
1782
  yield ""
1783

    
1784

    
1785
def _CheckArguments(cmd, args_def, args):
1786
  """Verifies the arguments using the argument definition.
1787

1788
  Algorithm:
1789

1790
    1. Abort with error if values specified by user but none expected.
1791

1792
    1. For each argument in definition
1793

1794
      1. Keep running count of minimum number of values (min_count)
1795
      1. Keep running count of maximum number of values (max_count)
1796
      1. If it has an unlimited number of values
1797

1798
        1. Abort with error if it's not the last argument in the definition
1799

1800
    1. If last argument has limited number of values
1801

1802
      1. Abort with error if number of values doesn't match or is too large
1803

1804
    1. Abort with error if user didn't pass enough values (min_count)
1805

1806
  """
1807
  if args and not args_def:
1808
    ToStderr("Error: Command %s expects no arguments", cmd)
1809
    return False
1810

    
1811
  min_count = None
1812
  max_count = None
1813
  check_max = None
1814

    
1815
  last_idx = len(args_def) - 1
1816

    
1817
  for idx, arg in enumerate(args_def):
1818
    if min_count is None:
1819
      min_count = arg.min
1820
    elif arg.min is not None:
1821
      min_count += arg.min
1822

    
1823
    if max_count is None:
1824
      max_count = arg.max
1825
    elif arg.max is not None:
1826
      max_count += arg.max
1827

    
1828
    if idx == last_idx:
1829
      check_max = (arg.max is not None)
1830

    
1831
    elif arg.max is None:
1832
      raise errors.ProgrammerError("Only the last argument can have max=None")
1833

    
1834
  if check_max:
1835
    # Command with exact number of arguments
1836
    if (min_count is not None and max_count is not None and
1837
        min_count == max_count and len(args) != min_count):
1838
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1839
      return False
1840

    
1841
    # Command with limited number of arguments
1842
    if max_count is not None and len(args) > max_count:
1843
      ToStderr("Error: Command %s expects only %d argument(s)",
1844
               cmd, max_count)
1845
      return False
1846

    
1847
  # Command with some required arguments
1848
  if min_count is not None and len(args) < min_count:
1849
    ToStderr("Error: Command %s expects at least %d argument(s)",
1850
             cmd, min_count)
1851
    return False
1852

    
1853
  return True
1854

    
1855

    
1856
def SplitNodeOption(value):
1857
  """Splits the value of a --node option.
1858

1859
  """
1860
  if value and ":" in value:
1861
    return value.split(":", 1)
1862
  else:
1863
    return (value, None)
1864

    
1865

    
1866
def CalculateOSNames(os_name, os_variants):
1867
  """Calculates all the names an OS can be called, according to its variants.
1868

1869
  @type os_name: string
1870
  @param os_name: base name of the os
1871
  @type os_variants: list or None
1872
  @param os_variants: list of supported variants
1873
  @rtype: list
1874
  @return: list of valid names
1875

1876
  """
1877
  if os_variants:
1878
    return ["%s+%s" % (os_name, v) for v in os_variants]
1879
  else:
1880
    return [os_name]
1881

    
1882

    
1883
def ParseFields(selected, default):
1884
  """Parses the values of "--field"-like options.
1885

1886
  @type selected: string or None
1887
  @param selected: User-selected options
1888
  @type default: list
1889
  @param default: Default fields
1890

1891
  """
1892
  if selected is None:
1893
    return default
1894

    
1895
  if selected.startswith("+"):
1896
    return default + selected[1:].split(",")
1897

    
1898
  return selected.split(",")
1899

    
1900

    
1901
UsesRPC = rpc.RunWithRPC
1902

    
1903

    
1904
def AskUser(text, choices=None):
1905
  """Ask the user a question.
1906

1907
  @param text: the question to ask
1908

1909
  @param choices: list with elements tuples (input_char, return_value,
1910
      description); if not given, it will default to: [('y', True,
1911
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1912
      note that the '?' char is reserved for help
1913

1914
  @return: one of the return values from the choices list; if input is
1915
      not possible (i.e. not running with a tty, we return the last
1916
      entry from the list
1917

1918
  """
1919
  if choices is None:
1920
    choices = [("y", True, "Perform the operation"),
1921
               ("n", False, "Do not perform the operation")]
1922
  if not choices or not isinstance(choices, list):
1923
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1924
  for entry in choices:
1925
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1926
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1927

    
1928
  answer = choices[-1][1]
1929
  new_text = []
1930
  for line in text.splitlines():
1931
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1932
  text = "\n".join(new_text)
1933
  try:
1934
    f = file("/dev/tty", "a+")
1935
  except IOError:
1936
    return answer
1937
  try:
1938
    chars = [entry[0] for entry in choices]
1939
    chars[-1] = "[%s]" % chars[-1]
1940
    chars.append("?")
1941
    maps = dict([(entry[0], entry[1]) for entry in choices])
1942
    while True:
1943
      f.write(text)
1944
      f.write("\n")
1945
      f.write("/".join(chars))
1946
      f.write(": ")
1947
      line = f.readline(2).strip().lower()
1948
      if line in maps:
1949
        answer = maps[line]
1950
        break
1951
      elif line == "?":
1952
        for entry in choices:
1953
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1954
        f.write("\n")
1955
        continue
1956
  finally:
1957
    f.close()
1958
  return answer
1959

    
1960

    
1961
class JobSubmittedException(Exception):
1962
  """Job was submitted, client should exit.
1963

1964
  This exception has one argument, the ID of the job that was
1965
  submitted. The handler should print this ID.
1966

1967
  This is not an error, just a structured way to exit from clients.
1968

1969
  """
1970

    
1971

    
1972
def SendJob(ops, cl=None):
1973
  """Function to submit an opcode without waiting for the results.
1974

1975
  @type ops: list
1976
  @param ops: list of opcodes
1977
  @type cl: luxi.Client
1978
  @param cl: the luxi client to use for communicating with the master;
1979
             if None, a new client will be created
1980

1981
  """
1982
  if cl is None:
1983
    cl = GetClient()
1984

    
1985
  job_id = cl.SubmitJob(ops)
1986

    
1987
  return job_id
1988

    
1989

    
1990
def GenericPollJob(job_id, cbs, report_cbs):
1991
  """Generic job-polling function.
1992

1993
  @type job_id: number
1994
  @param job_id: Job ID
1995
  @type cbs: Instance of L{JobPollCbBase}
1996
  @param cbs: Data callbacks
1997
  @type report_cbs: Instance of L{JobPollReportCbBase}
1998
  @param report_cbs: Reporting callbacks
1999

2000
  """
2001
  prev_job_info = None
2002
  prev_logmsg_serial = None
2003

    
2004
  status = None
2005

    
2006
  while True:
2007
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2008
                                      prev_logmsg_serial)
2009
    if not result:
2010
      # job not found, go away!
2011
      raise errors.JobLost("Job with id %s lost" % job_id)
2012

    
2013
    if result == constants.JOB_NOTCHANGED:
2014
      report_cbs.ReportNotChanged(job_id, status)
2015

    
2016
      # Wait again
2017
      continue
2018

    
2019
    # Split result, a tuple of (field values, log entries)
2020
    (job_info, log_entries) = result
2021
    (status, ) = job_info
2022

    
2023
    if log_entries:
2024
      for log_entry in log_entries:
2025
        (serial, timestamp, log_type, message) = log_entry
2026
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2027
                                    log_type, message)
2028
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2029

    
2030
    # TODO: Handle canceled and archived jobs
2031
    elif status in (constants.JOB_STATUS_SUCCESS,
2032
                    constants.JOB_STATUS_ERROR,
2033
                    constants.JOB_STATUS_CANCELING,
2034
                    constants.JOB_STATUS_CANCELED):
2035
      break
2036

    
2037
    prev_job_info = job_info
2038

    
2039
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2040
  if not jobs:
2041
    raise errors.JobLost("Job with id %s lost" % job_id)
2042

    
2043
  status, opstatus, result = jobs[0]
2044

    
2045
  if status == constants.JOB_STATUS_SUCCESS:
2046
    return result
2047

    
2048
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2049
    raise errors.OpExecError("Job was canceled")
2050

    
2051
  has_ok = False
2052
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2053
    if status == constants.OP_STATUS_SUCCESS:
2054
      has_ok = True
2055
    elif status == constants.OP_STATUS_ERROR:
2056
      errors.MaybeRaise(msg)
2057

    
2058
      if has_ok:
2059
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2060
                                 (idx, msg))
2061

    
2062
      raise errors.OpExecError(str(msg))
2063

    
2064
  # default failure mode
2065
  raise errors.OpExecError(result)
2066

    
2067

    
2068
class JobPollCbBase:
2069
  """Base class for L{GenericPollJob} callbacks.
2070

2071
  """
2072
  def __init__(self):
2073
    """Initializes this class.
2074

2075
    """
2076

    
2077
  def WaitForJobChangeOnce(self, job_id, fields,
2078
                           prev_job_info, prev_log_serial):
2079
    """Waits for changes on a job.
2080

2081
    """
2082
    raise NotImplementedError()
2083

    
2084
  def QueryJobs(self, job_ids, fields):
2085
    """Returns the selected fields for the selected job IDs.
2086

2087
    @type job_ids: list of numbers
2088
    @param job_ids: Job IDs
2089
    @type fields: list of strings
2090
    @param fields: Fields
2091

2092
    """
2093
    raise NotImplementedError()
2094

    
2095

    
2096
class JobPollReportCbBase:
2097
  """Base class for L{GenericPollJob} reporting callbacks.
2098

2099
  """
2100
  def __init__(self):
2101
    """Initializes this class.
2102

2103
    """
2104

    
2105
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2106
    """Handles a log message.
2107

2108
    """
2109
    raise NotImplementedError()
2110

    
2111
  def ReportNotChanged(self, job_id, status):
2112
    """Called for if a job hasn't changed in a while.
2113

2114
    @type job_id: number
2115
    @param job_id: Job ID
2116
    @type status: string or None
2117
    @param status: Job status if available
2118

2119
    """
2120
    raise NotImplementedError()
2121

    
2122

    
2123
class _LuxiJobPollCb(JobPollCbBase):
2124
  def __init__(self, cl):
2125
    """Initializes this class.
2126

2127
    """
2128
    JobPollCbBase.__init__(self)
2129
    self.cl = cl
2130

    
2131
  def WaitForJobChangeOnce(self, job_id, fields,
2132
                           prev_job_info, prev_log_serial):
2133
    """Waits for changes on a job.
2134

2135
    """
2136
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2137
                                        prev_job_info, prev_log_serial)
2138

    
2139
  def QueryJobs(self, job_ids, fields):
2140
    """Returns the selected fields for the selected job IDs.
2141

2142
    """
2143
    return self.cl.QueryJobs(job_ids, fields)
2144

    
2145

    
2146
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2147
  def __init__(self, feedback_fn):
2148
    """Initializes this class.
2149

2150
    """
2151
    JobPollReportCbBase.__init__(self)
2152

    
2153
    self.feedback_fn = feedback_fn
2154

    
2155
    assert callable(feedback_fn)
2156

    
2157
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2158
    """Handles a log message.
2159

2160
    """
2161
    self.feedback_fn((timestamp, log_type, log_msg))
2162

    
2163
  def ReportNotChanged(self, job_id, status):
2164
    """Called if a job hasn't changed in a while.
2165

2166
    """
2167
    # Ignore
2168

    
2169

    
2170
class StdioJobPollReportCb(JobPollReportCbBase):
2171
  def __init__(self):
2172
    """Initializes this class.
2173

2174
    """
2175
    JobPollReportCbBase.__init__(self)
2176

    
2177
    self.notified_queued = False
2178
    self.notified_waitlock = False
2179

    
2180
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2181
    """Handles a log message.
2182

2183
    """
2184
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2185
             FormatLogMessage(log_type, log_msg))
2186

    
2187
  def ReportNotChanged(self, job_id, status):
2188
    """Called if a job hasn't changed in a while.
2189

2190
    """
2191
    if status is None:
2192
      return
2193

    
2194
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2195
      ToStderr("Job %s is waiting in queue", job_id)
2196
      self.notified_queued = True
2197

    
2198
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2199
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2200
      self.notified_waitlock = True
2201

    
2202

    
2203
def FormatLogMessage(log_type, log_msg):
2204
  """Formats a job message according to its type.
2205

2206
  """
2207
  if log_type != constants.ELOG_MESSAGE:
2208
    log_msg = str(log_msg)
2209

    
2210
  return utils.SafeEncode(log_msg)
2211

    
2212

    
2213
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2214
  """Function to poll for the result of a job.
2215

2216
  @type job_id: job identified
2217
  @param job_id: the job to poll for results
2218
  @type cl: luxi.Client
2219
  @param cl: the luxi client to use for communicating with the master;
2220
             if None, a new client will be created
2221

2222
  """
2223
  if cl is None:
2224
    cl = GetClient()
2225

    
2226
  if reporter is None:
2227
    if feedback_fn:
2228
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2229
    else:
2230
      reporter = StdioJobPollReportCb()
2231
  elif feedback_fn:
2232
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2233

    
2234
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2235

    
2236

    
2237
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2238
  """Legacy function to submit an opcode.
2239

2240
  This is just a simple wrapper over the construction of the processor
2241
  instance. It should be extended to better handle feedback and
2242
  interaction functions.
2243

2244
  """
2245
  if cl is None:
2246
    cl = GetClient()
2247

    
2248
  SetGenericOpcodeOpts([op], opts)
2249

    
2250
  job_id = SendJob([op], cl=cl)
2251

    
2252
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2253
                       reporter=reporter)
2254

    
2255
  return op_results[0]
2256

    
2257

    
2258
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2259
  """Wrapper around SubmitOpCode or SendJob.
2260

2261
  This function will decide, based on the 'opts' parameter, whether to
2262
  submit and wait for the result of the opcode (and return it), or
2263
  whether to just send the job and print its identifier. It is used in
2264
  order to simplify the implementation of the '--submit' option.
2265

2266
  It will also process the opcodes if we're sending the via SendJob
2267
  (otherwise SubmitOpCode does it).
2268

2269
  """
2270
  if opts and opts.submit_only:
2271
    job = [op]
2272
    SetGenericOpcodeOpts(job, opts)
2273
    job_id = SendJob(job, cl=cl)
2274
    raise JobSubmittedException(job_id)
2275
  else:
2276
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2277

    
2278

    
2279
def _InitReasonTrail(op, opts):
2280
  """Builds the first part of the reason trail
2281

2282
  Builds the initial part of the reason trail, adding the user provided reason
2283
  (if it exists) and the name of the command starting the operation.
2284

2285
  @param op: the opcode the reason trail will be added to
2286
  @param opts: the command line options selected by the user
2287

2288
  """
2289
  assert len(sys.argv) >= 2
2290
  trail = []
2291

    
2292
  if opts.reason:
2293
    trail.append((constants.OPCODE_REASON_SRC_USER,
2294
                  opts.reason,
2295
                  utils.EpochNano()))
2296

    
2297
  binary = os.path.basename(sys.argv[0])
2298
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2299
  command = sys.argv[1]
2300
  trail.append((source, command, utils.EpochNano()))
2301
  op.reason = trail
2302

    
2303

    
2304
def SetGenericOpcodeOpts(opcode_list, options):
2305
  """Processor for generic options.
2306

2307
  This function updates the given opcodes based on generic command
2308
  line options (like debug, dry-run, etc.).
2309

2310
  @param opcode_list: list of opcodes
2311
  @param options: command line options or None
2312
  @return: None (in-place modification)
2313

2314
  """
2315
  if not options:
2316
    return
2317
  for op in opcode_list:
2318
    op.debug_level = options.debug
2319
    if hasattr(options, "dry_run"):
2320
      op.dry_run = options.dry_run
2321
    if getattr(options, "priority", None) is not None:
2322
      op.priority = options.priority
2323
    _InitReasonTrail(op, options)
2324

    
2325

    
2326
def GetClient(query=False):
2327
  """Connects to the a luxi socket and returns a client.
2328

2329
  @type query: boolean
2330
  @param query: this signifies that the client will only be
2331
      used for queries; if the build-time parameter
2332
      enable-split-queries is enabled, then the client will be
2333
      connected to the query socket instead of the masterd socket
2334

2335
  """
2336
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2337
  if override_socket:
2338
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2339
      address = pathutils.MASTER_SOCKET
2340
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2341
      address = pathutils.QUERY_SOCKET
2342
    else:
2343
      address = override_socket
2344
  elif query and constants.ENABLE_SPLIT_QUERY:
2345
    address = pathutils.QUERY_SOCKET
2346
  else:
2347
    address = None
2348
  # TODO: Cache object?
2349
  try:
2350
    client = luxi.Client(address=address)
2351
  except luxi.NoMasterError:
2352
    ss = ssconf.SimpleStore()
2353

    
2354
    # Try to read ssconf file
2355
    try:
2356
      ss.GetMasterNode()
2357
    except errors.ConfigurationError:
2358
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2359
                                 " not part of a cluster",
2360
                                 errors.ECODE_INVAL)
2361

    
2362
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2363
    if master != myself:
2364
      raise errors.OpPrereqError("This is not the master node, please connect"
2365
                                 " to node '%s' and rerun the command" %
2366
                                 master, errors.ECODE_INVAL)
2367
    raise
2368
  return client
2369

    
2370

    
2371
def FormatError(err):
2372
  """Return a formatted error message for a given error.
2373

2374
  This function takes an exception instance and returns a tuple
2375
  consisting of two values: first, the recommended exit code, and
2376
  second, a string describing the error message (not
2377
  newline-terminated).
2378

2379
  """
2380
  retcode = 1
2381
  obuf = StringIO()
2382
  msg = str(err)
2383
  if isinstance(err, errors.ConfigurationError):
2384
    txt = "Corrupt configuration file: %s" % msg
2385
    logging.error(txt)
2386
    obuf.write(txt + "\n")
2387
    obuf.write("Aborting.")
2388
    retcode = 2
2389
  elif isinstance(err, errors.HooksAbort):
2390
    obuf.write("Failure: hooks execution failed:\n")
2391
    for node, script, out in err.args[0]:
2392
      if out:
2393
        obuf.write("  node: %s, script: %s, output: %s\n" %
2394
                   (node, script, out))
2395
      else:
2396
        obuf.write("  node: %s, script: %s (no output)\n" %
2397
                   (node, script))
2398
  elif isinstance(err, errors.HooksFailure):
2399
    obuf.write("Failure: hooks general failure: %s" % msg)
2400
  elif isinstance(err, errors.ResolverError):
2401
    this_host = netutils.Hostname.GetSysName()
2402
    if err.args[0] == this_host:
2403
      msg = "Failure: can't resolve my own hostname ('%s')"
2404
    else:
2405
      msg = "Failure: can't resolve hostname '%s'"
2406
    obuf.write(msg % err.args[0])
2407
  elif isinstance(err, errors.OpPrereqError):
2408
    if len(err.args) == 2:
2409
      obuf.write("Failure: prerequisites not met for this"
2410
                 " operation:\nerror type: %s, error details:\n%s" %
2411
                 (err.args[1], err.args[0]))
2412
    else:
2413
      obuf.write("Failure: prerequisites not met for this"
2414
                 " operation:\n%s" % msg)
2415
  elif isinstance(err, errors.OpExecError):
2416
    obuf.write("Failure: command execution error:\n%s" % msg)
2417
  elif isinstance(err, errors.TagError):
2418
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2419
  elif isinstance(err, errors.JobQueueDrainError):
2420
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2421
               " accept new requests\n")
2422
  elif isinstance(err, errors.JobQueueFull):
2423
    obuf.write("Failure: the job queue is full and doesn't accept new"
2424
               " job submissions until old jobs are archived\n")
2425
  elif isinstance(err, errors.TypeEnforcementError):
2426
    obuf.write("Parameter Error: %s" % msg)
2427
  elif isinstance(err, errors.ParameterError):
2428
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2429
  elif isinstance(err, luxi.NoMasterError):
2430
    if err.args[0] == pathutils.MASTER_SOCKET:
2431
      daemon = "the master daemon"
2432
    elif err.args[0] == pathutils.QUERY_SOCKET:
2433
      daemon = "the config daemon"
2434
    else:
2435
      daemon = "socket '%s'" % str(err.args[0])
2436
    obuf.write("Cannot communicate with %s.\nIs the process running"
2437
               " and listening for connections?" % daemon)
2438
  elif isinstance(err, luxi.TimeoutError):
2439
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2440
               " been submitted and will continue to run even if the call"
2441
               " timed out. Useful commands in this situation are \"gnt-job"
2442
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2443
    obuf.write(msg)
2444
  elif isinstance(err, luxi.PermissionError):
2445
    obuf.write("It seems you don't have permissions to connect to the"
2446
               " master daemon.\nPlease retry as a different user.")
2447
  elif isinstance(err, luxi.ProtocolError):
2448
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2449
               "%s" % msg)
2450
  elif isinstance(err, errors.JobLost):
2451
    obuf.write("Error checking job status: %s" % msg)
2452
  elif isinstance(err, errors.QueryFilterParseError):
2453
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2454
    obuf.write("\n".join(err.GetDetails()))
2455
  elif isinstance(err, errors.GenericError):
2456
    obuf.write("Unhandled Ganeti error: %s" % msg)
2457
  elif isinstance(err, JobSubmittedException):
2458
    obuf.write("JobID: %s\n" % err.args[0])
2459
    retcode = 0
2460
  else:
2461
    obuf.write("Unhandled exception: %s" % msg)
2462
  return retcode, obuf.getvalue().rstrip("\n")
2463

    
2464

    
2465
def GenericMain(commands, override=None, aliases=None,
2466
                env_override=frozenset()):
2467
  """Generic main function for all the gnt-* commands.
2468

2469
  @param commands: a dictionary with a special structure, see the design doc
2470
                   for command line handling.
2471
  @param override: if not None, we expect a dictionary with keys that will
2472
                   override command line options; this can be used to pass
2473
                   options from the scripts to generic functions
2474
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2475
  @param env_override: list of environment names which are allowed to submit
2476
                       default args for commands
2477

2478
  """
2479
  # save the program name and the entire command line for later logging
2480
  if sys.argv:
2481
    binary = os.path.basename(sys.argv[0])
2482
    if not binary:
2483
      binary = sys.argv[0]
2484

    
2485
    if len(sys.argv) >= 2:
2486
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2487
    else:
2488
      logname = binary
2489

    
2490
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2491
  else:
2492
    binary = "<unknown program>"
2493
    cmdline = "<unknown>"
2494

    
2495
  if aliases is None:
2496
    aliases = {}
2497

    
2498
  try:
2499
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2500
                                       env_override)
2501
  except _ShowVersion:
2502
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2503
             constants.RELEASE_VERSION)
2504
    return constants.EXIT_SUCCESS
2505
  except _ShowUsage, err:
2506
    for line in _FormatUsage(binary, commands):
2507
      ToStdout(line)
2508

    
2509
    if err.exit_error:
2510
      return constants.EXIT_FAILURE
2511
    else:
2512
      return constants.EXIT_SUCCESS
2513
  except errors.ParameterError, err:
2514
    result, err_msg = FormatError(err)
2515
    ToStderr(err_msg)
2516
    return 1
2517

    
2518
  if func is None: # parse error
2519
    return 1
2520

    
2521
  if override is not None:
2522
    for key, val in override.iteritems():
2523
      setattr(options, key, val)
2524

    
2525
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2526
                     stderr_logging=True)
2527

    
2528
  logging.info("Command line: %s", cmdline)
2529

    
2530
  try:
2531
    result = func(options, args)
2532
  except (errors.GenericError, luxi.ProtocolError,
2533
          JobSubmittedException), err:
2534
    result, err_msg = FormatError(err)
2535
    logging.exception("Error during command processing")
2536
    ToStderr(err_msg)
2537
  except KeyboardInterrupt:
2538
    result = constants.EXIT_FAILURE
2539
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2540
             " might have been submitted and"
2541
             " will continue to run in the background.")
2542
  except IOError, err:
2543
    if err.errno == errno.EPIPE:
2544
      # our terminal went away, we'll exit
2545
      sys.exit(constants.EXIT_FAILURE)
2546
    else:
2547
      raise
2548

    
2549
  return result
2550

    
2551

    
2552
def ParseNicOption(optvalue):
2553
  """Parses the value of the --net option(s).
2554

2555
  """
2556
  try:
2557
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2558
  except (TypeError, ValueError), err:
2559
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2560
                               errors.ECODE_INVAL)
2561

    
2562
  nics = [{}] * nic_max
2563
  for nidx, ndict in optvalue:
2564
    nidx = int(nidx)
2565

    
2566
    if not isinstance(ndict, dict):
2567
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2568
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2569

    
2570
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2571

    
2572
    nics[nidx] = ndict
2573

    
2574
  return nics
2575

    
2576

    
2577
def GenericInstanceCreate(mode, opts, args):
2578
  """Add an instance to the cluster via either creation or import.
2579

2580
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2581
  @param opts: the command line options selected by the user
2582
  @type args: list
2583
  @param args: should contain only one element, the new instance name
2584
  @rtype: int
2585
  @return: the desired exit code
2586

2587
  """
2588
  instance = args[0]
2589

    
2590
  (pnode, snode) = SplitNodeOption(opts.node)
2591

    
2592
  hypervisor = None
2593
  hvparams = {}
2594
  if opts.hypervisor:
2595
    hypervisor, hvparams = opts.hypervisor
2596

    
2597
  if opts.nics:
2598
    nics = ParseNicOption(opts.nics)
2599
  elif opts.no_nics:
2600
    # no nics
2601
    nics = []
2602
  elif mode == constants.INSTANCE_CREATE:
2603
    # default of one nic, all auto
2604
    nics = [{}]
2605
  else:
2606
    # mode == import
2607
    nics = []
2608

    
2609
  if opts.disk_template == constants.DT_DISKLESS:
2610
    if opts.disks or opts.sd_size is not None:
2611
      raise errors.OpPrereqError("Diskless instance but disk"
2612
                                 " information passed", errors.ECODE_INVAL)
2613
    disks = []
2614
  else:
2615
    if (not opts.disks and not opts.sd_size
2616
        and mode == constants.INSTANCE_CREATE):
2617
      raise errors.OpPrereqError("No disk information specified",
2618
                                 errors.ECODE_INVAL)
2619
    if opts.disks and opts.sd_size is not None:
2620
      raise errors.OpPrereqError("Please use either the '--disk' or"
2621
                                 " '-s' option", errors.ECODE_INVAL)
2622
    if opts.sd_size is not None:
2623
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2624

    
2625
    if opts.disks:
2626
      try:
2627
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2628
      except ValueError, err:
2629
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2630
                                   errors.ECODE_INVAL)
2631
      disks = [{}] * disk_max
2632
    else:
2633
      disks = []
2634
    for didx, ddict in opts.disks:
2635
      didx = int(didx)
2636
      if not isinstance(ddict, dict):
2637
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2638
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2639
      elif constants.IDISK_SIZE in ddict:
2640
        if constants.IDISK_ADOPT in ddict:
2641
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2642
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2643
        try:
2644
          ddict[constants.IDISK_SIZE] = \
2645
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2646
        except ValueError, err:
2647
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2648
                                     (didx, err), errors.ECODE_INVAL)
2649
      elif constants.IDISK_ADOPT in ddict:
2650
        if mode == constants.INSTANCE_IMPORT:
2651
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2652
                                     " import", errors.ECODE_INVAL)
2653
        ddict[constants.IDISK_SIZE] = 0
2654
      else:
2655
        raise errors.OpPrereqError("Missing size or adoption source for"
2656
                                   " disk %d" % didx, errors.ECODE_INVAL)
2657
      disks[didx] = ddict
2658

    
2659
  if opts.tags is not None:
2660
    tags = opts.tags.split(",")
2661
  else:
2662
    tags = []
2663

    
2664
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2665
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2666

    
2667
  if mode == constants.INSTANCE_CREATE:
2668
    start = opts.start
2669
    os_type = opts.os
2670
    force_variant = opts.force_variant
2671
    src_node = None
2672
    src_path = None
2673
    no_install = opts.no_install
2674
    identify_defaults = False
2675
  elif mode == constants.INSTANCE_IMPORT:
2676
    start = False
2677
    os_type = None
2678
    force_variant = False
2679
    src_node = opts.src_node
2680
    src_path = opts.src_dir
2681
    no_install = None
2682
    identify_defaults = opts.identify_defaults
2683
  else:
2684
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2685

    
2686
  op = opcodes.OpInstanceCreate(instance_name=instance,
2687
                                disks=disks,
2688
                                disk_template=opts.disk_template,
2689
                                nics=nics,
2690
                                conflicts_check=opts.conflicts_check,
2691
                                pnode=pnode, snode=snode,
2692
                                ip_check=opts.ip_check,
2693
                                name_check=opts.name_check,
2694
                                wait_for_sync=opts.wait_for_sync,
2695
                                file_storage_dir=opts.file_storage_dir,
2696
                                file_driver=opts.file_driver,
2697
                                iallocator=opts.iallocator,
2698
                                hypervisor=hypervisor,
2699
                                hvparams=hvparams,
2700
                                beparams=opts.beparams,
2701
                                osparams=opts.osparams,
2702
                                mode=mode,
2703
                                start=start,
2704
                                os_type=os_type,
2705
                                force_variant=force_variant,
2706
                                src_node=src_node,
2707
                                src_path=src_path,
2708
                                tags=tags,
2709
                                no_install=no_install,
2710
                                identify_defaults=identify_defaults,
2711
                                ignore_ipolicy=opts.ignore_ipolicy)
2712

    
2713
  SubmitOrSend(op, opts)
2714
  return 0
2715

    
2716

    
2717
class _RunWhileClusterStoppedHelper:
2718
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2719

2720
  """
2721
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2722
    """Initializes this class.
2723

2724
    @type feedback_fn: callable
2725
    @param feedback_fn: Feedback function
2726
    @type cluster_name: string
2727
    @param cluster_name: Cluster name
2728
    @type master_node: string
2729
    @param master_node Master node name
2730
    @type online_nodes: list
2731
    @param online_nodes: List of names of online nodes
2732

2733
    """
2734
    self.feedback_fn = feedback_fn
2735
    self.cluster_name = cluster_name
2736
    self.master_node = master_node
2737
    self.online_nodes = online_nodes
2738

    
2739
    self.ssh = ssh.SshRunner(self.cluster_name)
2740

    
2741
    self.nonmaster_nodes = [name for name in online_nodes
2742
                            if name != master_node]
2743

    
2744
    assert self.master_node not in self.nonmaster_nodes
2745

    
2746
  def _RunCmd(self, node_name, cmd):
2747
    """Runs a command on the local or a remote machine.
2748

2749
    @type node_name: string
2750
    @param node_name: Machine name
2751
    @type cmd: list
2752
    @param cmd: Command
2753

2754
    """
2755
    if node_name is None or node_name == self.master_node:
2756
      # No need to use SSH
2757
      result = utils.RunCmd(cmd)
2758
    else:
2759
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2760
                            utils.ShellQuoteArgs(cmd))
2761

    
2762
    if result.failed:
2763
      errmsg = ["Failed to run command %s" % result.cmd]
2764
      if node_name:
2765
        errmsg.append("on node %s" % node_name)
2766
      errmsg.append(": exitcode %s and error %s" %
2767
                    (result.exit_code, result.output))
2768
      raise errors.OpExecError(" ".join(errmsg))
2769

    
2770
  def Call(self, fn, *args):
2771
    """Call function while all daemons are stopped.
2772

2773
    @type fn: callable
2774
    @param fn: Function to be called
2775

2776
    """
2777
    # Pause watcher by acquiring an exclusive lock on watcher state file
2778
    self.feedback_fn("Blocking watcher")
2779
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2780
    try:
2781
      # TODO: Currently, this just blocks. There's no timeout.
2782
      # TODO: Should it be a shared lock?
2783
      watcher_block.Exclusive(blocking=True)
2784

    
2785
      # Stop master daemons, so that no new jobs can come in and all running
2786
      # ones are finished
2787
      self.feedback_fn("Stopping master daemons")
2788
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2789
      try:
2790
        # Stop daemons on all nodes
2791
        for node_name in self.online_nodes:
2792
          self.feedback_fn("Stopping daemons on %s" % node_name)
2793
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2794

    
2795
        # All daemons are shut down now
2796
        try:
2797
          return fn(self, *args)
2798
        except Exception, err:
2799
          _, errmsg = FormatError(err)
2800
          logging.exception("Caught exception")
2801
          self.feedback_fn(errmsg)
2802
          raise
2803
      finally:
2804
        # Start cluster again, master node last
2805
        for node_name in self.nonmaster_nodes + [self.master_node]:
2806
          self.feedback_fn("Starting daemons on %s" % node_name)
2807
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2808
    finally:
2809
      # Resume watcher
2810
      watcher_block.Close()
2811

    
2812

    
2813
def RunWhileClusterStopped(feedback_fn, fn, *args):
2814
  """Calls a function while all cluster daemons are stopped.
2815

2816
  @type feedback_fn: callable
2817
  @param feedback_fn: Feedback function
2818
  @type fn: callable
2819
  @param fn: Function to be called when daemons are stopped
2820

2821
  """
2822
  feedback_fn("Gathering cluster information")
2823

    
2824
  # This ensures we're running on the master daemon
2825
  cl = GetClient()
2826

    
2827
  (cluster_name, master_node) = \
2828
    cl.QueryConfigValues(["cluster_name", "master_node"])
2829

    
2830
  online_nodes = GetOnlineNodes([], cl=cl)
2831

    
2832
  # Don't keep a reference to the client. The master daemon will go away.
2833
  del cl
2834

    
2835
  assert master_node in online_nodes
2836

    
2837
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2838
                                       online_nodes).Call(fn, *args)
2839

    
2840

    
2841
def GenerateTable(headers, fields, separator, data,
2842
                  numfields=None, unitfields=None,
2843
                  units=None):
2844
  """Prints a table with headers and different fields.
2845

2846
  @type headers: dict
2847
  @param headers: dictionary mapping field names to headers for
2848
      the table
2849
  @type fields: list
2850
  @param fields: the field names corresponding to each row in
2851
      the data field
2852
  @param separator: the separator to be used; if this is None,
2853
      the default 'smart' algorithm is used which computes optimal
2854
      field width, otherwise just the separator is used between
2855
      each field
2856
  @type data: list
2857
  @param data: a list of lists, each sublist being one row to be output
2858
  @type numfields: list
2859
  @param numfields: a list with the fields that hold numeric
2860
      values and thus should be right-aligned
2861
  @type unitfields: list
2862
  @param unitfields: a list with the fields that hold numeric
2863
      values that should be formatted with the units field
2864
  @type units: string or None
2865
  @param units: the units we should use for formatting, or None for
2866
      automatic choice (human-readable for non-separator usage, otherwise
2867
      megabytes); this is a one-letter string
2868

2869
  """
2870
  if units is None:
2871
    if separator:
2872
      units = "m"
2873
    else:
2874
      units = "h"
2875

    
2876
  if numfields is None:
2877
    numfields = []
2878
  if unitfields is None:
2879
    unitfields = []
2880

    
2881
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2882
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2883

    
2884
  format_fields = []
2885
  for field in fields:
2886
    if headers and field not in headers:
2887
      # TODO: handle better unknown fields (either revert to old
2888
      # style of raising exception, or deal more intelligently with
2889
      # variable fields)
2890
      headers[field] = field
2891
    if separator is not None:
2892
      format_fields.append("%s")
2893
    elif numfields.Matches(field):
2894
      format_fields.append("%*s")
2895
    else:
2896
      format_fields.append("%-*s")
2897

    
2898
  if separator is None:
2899
    mlens = [0 for name in fields]
2900
    format_str = " ".join(format_fields)
2901
  else:
2902
    format_str = separator.replace("%", "%%").join(format_fields)
2903

    
2904
  for row in data:
2905
    if row is None:
2906
      continue
2907
    for idx, val in enumerate(row):
2908
      if unitfields.Matches(fields[idx]):
2909
        try:
2910
          val = int(val)
2911
        except (TypeError, ValueError):
2912
          pass
2913
        else:
2914
          val = row[idx] = utils.FormatUnit(val, units)
2915
      val = row[idx] = str(val)
2916
      if separator is None:
2917
        mlens[idx] = max(mlens[idx], len(val))
2918

    
2919
  result = []
2920
  if headers:
2921
    args = []
2922
    for idx, name in enumerate(fields):
2923
      hdr = headers[name]
2924
      if separator is None:
2925
        mlens[idx] = max(mlens[idx], len(hdr))
2926
        args.append(mlens[idx])
2927
      args.append(hdr)
2928
    result.append(format_str % tuple(args))
2929

    
2930
  if separator is None:
2931
    assert len(mlens) == len(fields)
2932

    
2933
    if fields and not numfields.Matches(fields[-1]):
2934
      mlens[-1] = 0
2935

    
2936
  for line in data:
2937
    args = []
2938
    if line is None:
2939
      line = ["-" for _ in fields]
2940
    for idx in range(len(fields)):
2941
      if separator is None:
2942
        args.append(mlens[idx])
2943
      args.append(line[idx])
2944
    result.append(format_str % tuple(args))
2945

    
2946
  return result
2947

    
2948

    
2949
def _FormatBool(value):
2950
  """Formats a boolean value as a string.
2951

2952
  """
2953
  if value:
2954
    return "Y"
2955
  return "N"
2956

    
2957

    
2958
#: Default formatting for query results; (callback, align right)
2959
_DEFAULT_FORMAT_QUERY = {
2960
  constants.QFT_TEXT: (str, False),
2961
  constants.QFT_BOOL: (_FormatBool, False),
2962
  constants.QFT_NUMBER: (str, True),
2963
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2964
  constants.QFT_OTHER: (str, False),
2965
  constants.QFT_UNKNOWN: (str, False),
2966
  }
2967

    
2968

    
2969
def _GetColumnFormatter(fdef, override, unit):
2970
  """Returns formatting function for a field.
2971

2972
  @type fdef: L{objects.QueryFieldDefinition}
2973
  @type override: dict
2974
  @param override: Dictionary for overriding field formatting functions,
2975
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2976
  @type unit: string
2977
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2978
  @rtype: tuple; (callable, bool)
2979
  @return: Returns the function to format a value (takes one parameter) and a
2980
    boolean for aligning the value on the right-hand side
2981

2982
  """
2983
  fmt = override.get(fdef.name, None)
2984
  if fmt is not None:
2985
    return fmt
2986

    
2987
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2988

    
2989
  if fdef.kind == constants.QFT_UNIT:
2990
    # Can't keep this information in the static dictionary
2991
    return (lambda value: utils.FormatUnit(value, unit), True)
2992

    
2993
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2994
  if fmt is not None:
2995
    return fmt
2996

    
2997
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2998

    
2999

    
3000
class _QueryColumnFormatter:
3001
  """Callable class for formatting fields of a query.
3002

3003
  """
3004
  def __init__(self, fn, status_fn, verbose):
3005
    """Initializes this class.
3006

3007
    @type fn: callable
3008
    @param fn: Formatting function
3009
    @type status_fn: callable
3010
    @param status_fn: Function to report fields' status
3011
    @type verbose: boolean
3012
    @param verbose: whether to use verbose field descriptions or not
3013

3014
    """
3015
    self._fn = fn
3016
    self._status_fn = status_fn
3017
    self._verbose = verbose
3018

    
3019
  def __call__(self, data):
3020
    """Returns a field's string representation.
3021

3022
    """
3023
    (status, value) = data
3024

    
3025
    # Report status
3026
    self._status_fn(status)
3027

    
3028
    if status == constants.RS_NORMAL:
3029
      return self._fn(value)
3030

    
3031
    assert value is None, \
3032
           "Found value %r for abnormal status %s" % (value, status)
3033

    
3034
    return FormatResultError(status, self._verbose)
3035

    
3036

    
3037
def FormatResultError(status, verbose):
3038
  """Formats result status other than L{constants.RS_NORMAL}.
3039

3040
  @param status: The result status
3041
  @type verbose: boolean
3042
  @param verbose: Whether to return the verbose text
3043
  @return: Text of result status
3044

3045
  """
3046
  assert status != constants.RS_NORMAL, \
3047
         "FormatResultError called with status equal to constants.RS_NORMAL"
3048
  try:
3049
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3050
  except KeyError:
3051
    raise NotImplementedError("Unknown status %s" % status)
3052
  else:
3053
    if verbose:
3054
      return verbose_text
3055
    return normal_text
3056

    
3057

    
3058
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3059
                      header=False, verbose=False):
3060
  """Formats data in L{objects.QueryResponse}.
3061

3062
  @type result: L{objects.QueryResponse}
3063
  @param result: result of query operation
3064
  @type unit: string
3065
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3066
    see L{utils.text.FormatUnit}
3067
  @type format_override: dict
3068
  @param format_override: Dictionary for overriding field formatting functions,
3069
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3070
  @type separator: string or None
3071
  @param separator: String used to separate fields
3072
  @type header: bool
3073
  @param header: Whether to output header row
3074
  @type verbose: boolean
3075
  @param verbose: whether to use verbose field descriptions or not
3076

3077
  """
3078
  if unit is None:
3079
    if separator:
3080
      unit = "m"
3081
    else:
3082
      unit = "h"
3083

    
3084
  if format_override is None:
3085
    format_override = {}
3086

    
3087
  stats = dict.fromkeys(constants.RS_ALL, 0)
3088

    
3089
  def _RecordStatus(status):
3090
    if status in stats:
3091
      stats[status] += 1
3092

    
3093
  columns = []
3094
  for fdef in result.fields:
3095
    assert fdef.title and fdef.name
3096
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3097
    columns.append(TableColumn(fdef.title,
3098
                               _QueryColumnFormatter(fn, _RecordStatus,
3099
                                                     verbose),
3100
                               align_right))
3101

    
3102
  table = FormatTable(result.data, columns, header, separator)
3103

    
3104
  # Collect statistics
3105
  assert len(stats) == len(constants.RS_ALL)
3106
  assert compat.all(count >= 0 for count in stats.values())
3107

    
3108
  # Determine overall status. If there was no data, unknown fields must be
3109
  # detected via the field definitions.
3110
  if (stats[constants.RS_UNKNOWN] or
3111
      (not result.data and _GetUnknownFields(result.fields))):
3112
    status = QR_UNKNOWN
3113
  elif compat.any(count > 0 for key, count in stats.items()
3114
                  if key != constants.RS_NORMAL):
3115
    status = QR_INCOMPLETE
3116
  else:
3117
    status = QR_NORMAL
3118

    
3119
  return (status, table)
3120

    
3121

    
3122
def _GetUnknownFields(fdefs):
3123
  """Returns list of unknown fields included in C{fdefs}.
3124

3125
  @type fdefs: list of L{objects.QueryFieldDefinition}
3126

3127
  """
3128
  return [fdef for fdef in fdefs
3129
          if fdef.kind == constants.QFT_UNKNOWN]
3130

    
3131

    
3132
def _WarnUnknownFields(fdefs):
3133
  """Prints a warning to stderr if a query included unknown fields.
3134

3135
  @type fdefs: list of L{objects.QueryFieldDefinition}
3136

3137
  """
3138
  unknown = _GetUnknownFields(fdefs)
3139
  if unknown:
3140
    ToStderr("Warning: Queried for unknown fields %s",
3141
             utils.CommaJoin(fdef.name for fdef in unknown))
3142
    return True
3143

    
3144
  return False
3145

    
3146

    
3147
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3148
                format_override=None, verbose=False, force_filter=False,
3149
                namefield=None, qfilter=None, isnumeric=False):
3150
  """Generic implementation for listing all items of a resource.
3151

3152
  @param resource: One of L{constants.QR_VIA_LUXI}
3153
  @type fields: list of strings
3154
  @param fields: List of fields to query for
3155
  @type names: list of strings
3156
  @param names: Names of items to query for
3157
  @type unit: string or None
3158
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3159
    None for automatic choice (human-readable for non-separator usage,
3160
    otherwise megabytes); this is a one-letter string
3161
  @type separator: string or None
3162
  @param separator: String used to separate fields
3163
  @type header: bool
3164
  @param header: Whether to show header row
3165
  @type force_filter: bool
3166
  @param force_filter: Whether to always treat names as filter
3167
  @type format_override: dict
3168
  @param format_override: Dictionary for overriding field formatting functions,
3169
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3170
  @type verbose: boolean
3171
  @param verbose: whether to use verbose field descriptions or not
3172
  @type namefield: string
3173
  @param namefield: Name of field to use for simple filters (see
3174
    L{qlang.MakeFilter} for details)
3175
  @type qfilter: list or None
3176
  @param qfilter: Query filter (in addition to names)
3177
  @param isnumeric: bool
3178
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3179
    any simple filters built by namefield should use integer values to
3180
    reflect that
3181

3182
  """
3183
  if not names:
3184
    names = None
3185

    
3186
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3187
                                isnumeric=isnumeric)
3188

    
3189
  if qfilter is None:
3190
    qfilter = namefilter
3191
  elif namefilter is not None:
3192
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3193

    
3194
  if cl is None:
3195
    cl = GetClient()
3196

    
3197
  response = cl.Query(resource, fields, qfilter)
3198

    
3199
  found_unknown = _WarnUnknownFields(response.fields)
3200

    
3201
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3202
                                     header=header,
3203
                                     format_override=format_override,
3204
                                     verbose=verbose)
3205

    
3206
  for line in data:
3207
    ToStdout(line)
3208

    
3209
  assert ((found_unknown and status == QR_UNKNOWN) or
3210
          (not found_unknown and status != QR_UNKNOWN))
3211

    
3212
  if status == QR_UNKNOWN:
3213
    return constants.EXIT_UNKNOWN_FIELD
3214

    
3215
  # TODO: Should the list command fail if not all data could be collected?
3216
  return constants.EXIT_SUCCESS
3217

    
3218

    
3219
def _FieldDescValues(fdef):
3220
  """Helper function for L{GenericListFields} to get query field description.
3221

3222
  @type fdef: L{objects.QueryFieldDefinition}
3223
  @rtype: list
3224

3225
  """
3226
  return [
3227
    fdef.name,
3228
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3229
    fdef.title,
3230
    fdef.doc,
3231
    ]
3232

    
3233

    
3234
def GenericListFields(resource, fields, separator, header, cl=None):
3235
  """Generic implementation for listing fields for a resource.
3236

3237
  @param resource: One of L{constants.QR_VIA_LUXI}
3238
  @type fields: list of strings
3239
  @param fields: List of fields to query for
3240
  @type separator: string or None
3241
  @param separator: String used to separate fields
3242
  @type header: bool
3243
  @param header: Whether to show header row
3244

3245
  """
3246
  if cl is None:
3247
    cl = GetClient()
3248

    
3249
  if not fields:
3250
    fields = None
3251

    
3252
  response = cl.QueryFields(resource, fields)
3253

    
3254
  found_unknown = _WarnUnknownFields(response.fields)
3255

    
3256
  columns = [
3257
    TableColumn("Name", str, False),
3258
    TableColumn("Type", str, False),
3259
    TableColumn("Title", str, False),
3260
    TableColumn("Description", str, False),
3261
    ]
3262

    
3263
  rows = map(_FieldDescValues, response.fields)
3264

    
3265
  for line in FormatTable(rows, columns, header, separator):
3266
    ToStdout(line)
3267

    
3268
  if found_unknown:
3269
    return constants.EXIT_UNKNOWN_FIELD
3270

    
3271
  return constants.EXIT_SUCCESS
3272

    
3273

    
3274
class TableColumn:
3275
  """Describes a column for L{FormatTable}.
3276

3277
  """
3278
  def __init__(self, title, fn, align_right):
3279
    """Initializes this class.
3280

3281
    @type title: string
3282
    @param title: Column title
3283
    @type fn: callable
3284
    @param fn: Formatting function
3285
    @type align_right: bool
3286
    @param align_right: Whether to align values on the right-hand side
3287

3288
    """
3289
    self.title = title
3290
    self.format = fn
3291
    self.align_right = align_right
3292

    
3293

    
3294
def _GetColFormatString(width, align_right):
3295
  """Returns the format string for a field.
3296

3297
  """
3298
  if align_right:
3299
    sign = ""
3300
  else:
3301
    sign = "-"
3302

    
3303
  return "%%%s%ss" % (sign, width)
3304

    
3305

    
3306
def FormatTable(rows, columns, header, separator):
3307
  """Formats data as a table.
3308

3309
  @type rows: list of lists
3310
  @param rows: Row data, one list per row
3311
  @type columns: list of L{TableColumn}
3312
  @param columns: Column descriptions
3313
  @type header: bool
3314
  @param header: Whether to show header row
3315
  @type separator: string or None
3316
  @param separator: String used to separate columns
3317

3318
  """
3319
  if header:
3320
    data = [[col.title for col in columns]]
3321
    colwidth = [len(col.title) for col in columns]
3322
  else:
3323
    data = []
3324
    colwidth = [0 for _ in columns]
3325

    
3326
  # Format row data
3327
  for row in rows:
3328
    assert len(row) == len(columns)
3329

    
3330
    formatted = [col.format(value) for value, col in zip(row, columns)]
3331

    
3332
    if separator is None:
3333
      # Update column widths
3334
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3335
        # Modifying a list's items while iterating is fine
3336
        colwidth[idx] = max(oldwidth, len(value))
3337

    
3338
    data.append(formatted)
3339

    
3340
  if separator is not None:
3341
    # Return early if a separator is used
3342
    return [separator.join(row) for row in data]
3343

    
3344
  if columns and not columns[-1].align_right:
3345
    # Avoid unnecessary spaces at end of line
3346
    colwidth[-1] = 0
3347

    
3348
  # Build format string
3349
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3350
                  for col, width in zip(columns, colwidth)])
3351

    
3352
  return [fmt % tuple(row) for row in data]
3353

    
3354

    
3355
def FormatTimestamp(ts):
3356
  """Formats a given timestamp.
3357

3358
  @type ts: timestamp
3359
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3360

3361
  @rtype: string
3362
  @return: a string with the formatted timestamp
3363

3364
  """
3365
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3366
    return "?"
3367

    
3368
  (sec, usecs) = ts
3369
  return utils.FormatTime(sec, usecs=usecs)
3370

    
3371

    
3372
def ParseTimespec(value):
3373
  """Parse a time specification.
3374

3375
  The following suffixed will be recognized:
3376

3377
    - s: seconds
3378
    - m: minutes
3379
    - h: hours
3380
    - d: day
3381
    - w: weeks
3382

3383
  Without any suffix, the value will be taken to be in seconds.
3384

3385
  """
3386
  value = str(value)
3387
  if not value:
3388
    raise errors.OpPrereqError("Empty time specification passed",
3389
                               errors.ECODE_INVAL)
3390
  suffix_map = {
3391
    "s": 1,
3392
    "m": 60,
3393
    "h": 3600,
3394
    "d": 86400,
3395
    "w": 604800,
3396
    }
3397
  if value[-1] not in suffix_map:
3398
    try:
3399
      value = int(value)
3400
    except (TypeError, ValueError):
3401
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3402
                                 errors.ECODE_INVAL)
3403
  else:
3404
    multiplier = suffix_map[value[-1]]
3405
    value = value[:-1]
3406
    if not value: # no data left after stripping the suffix
3407
      raise errors.OpPrereqError("Invalid time specification (only"
3408
                                 " suffix passed)", errors.ECODE_INVAL)
3409
    try:
3410
      value = int(value) * multiplier
3411
    except (TypeError, ValueError):
3412
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3413
                                 errors.ECODE_INVAL)
3414
  return value
3415

    
3416

    
3417
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3418
                   filter_master=False, nodegroup=None):
3419
  """Returns the names of online nodes.
3420

3421
  This function will also log a warning on stderr with the names of
3422
  the online nodes.
3423

3424
  @param nodes: if not empty, use only this subset of nodes (minus the
3425
      offline ones)
3426
  @param cl: if not None, luxi client to use
3427
  @type nowarn: boolean
3428
  @param nowarn: by default, this function will output a note with the
3429
      offline nodes that are skipped; if this parameter is True the
3430
      note is not displayed
3431
  @type secondary_ips: boolean
3432
  @param secondary_ips: if True, return the secondary IPs instead of the
3433
      names, useful for doing network traffic over the replication interface
3434
      (if any)
3435
  @type filter_master: boolean
3436
  @param filter_master: if True, do not return the master node in the list
3437
      (useful in coordination with secondary_ips where we cannot check our
3438
      node name against the list)
3439
  @type nodegroup: string
3440
  @param nodegroup: If set, only return nodes in this node group
3441

3442
  """
3443
  if cl is None:
3444
    cl = GetClient()
3445

    
3446
  qfilter = []
3447

    
3448
  if nodes:
3449
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3450

    
3451
  if nodegroup is not None:
3452
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3453
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3454

    
3455
  if filter_master:
3456
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3457

    
3458
  if qfilter:
3459
    if len(qfilter) > 1:
3460
      final_filter = [qlang.OP_AND] + qfilter
3461
    else:
3462
      assert len(qfilter) == 1
3463
      final_filter = qfilter[0]
3464
  else:
3465
    final_filter = None
3466

    
3467
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3468

    
3469
  def _IsOffline(row):
3470
    (_, (_, offline), _) = row
3471
    return offline
3472

    
3473
  def _GetName(row):
3474
    ((_, name), _, _) = row
3475
    return name
3476

    
3477
  def _GetSip(row):
3478
    (_, _, (_, sip)) = row
3479
    return sip
3480

    
3481
  (offline, online) = compat.partition(result.data, _IsOffline)
3482

    
3483
  if offline and not nowarn:
3484
    ToStderr("Note: skipping offline node(s): %s" %
3485
             utils.CommaJoin(map(_GetName, offline)))
3486

    
3487
  if secondary_ips:
3488
    fn = _GetSip
3489
  else:
3490
    fn = _GetName
3491

    
3492
  return map(fn, online)
3493

    
3494

    
3495
def _ToStream(stream, txt, *args):
3496
  """Write a message to a stream, bypassing the logging system
3497

3498
  @type stream: file object
3499
  @param stream: the file to which we should write
3500
  @type txt: str
3501
  @param txt: the message
3502

3503
  """
3504
  try:
3505
    if args:
3506
      args = tuple(args)
3507
      stream.write(txt % args)
3508
    else:
3509
      stream.write(txt)
3510
    stream.write("\n")
3511
    stream.flush()
3512
  except IOError, err:
3513
    if err.errno == errno.EPIPE:
3514
      # our terminal went away, we'll exit
3515
      sys.exit(constants.EXIT_FAILURE)
3516
    else:
3517
      raise
3518

    
3519

    
3520
def ToStdout(txt, *args):
3521
  """Write a message to stdout only, bypassing the logging system
3522

3523
  This is just a wrapper over _ToStream.
3524

3525
  @type txt: str
3526
  @param txt: the message
3527

3528
  """
3529
  _ToStream(sys.stdout, txt, *args)
3530

    
3531

    
3532
def ToStderr(txt, *args):
3533
  """Write a message to stderr only, bypassing the logging system
3534

3535
  This is just a wrapper over _ToStream.
3536

3537
  @type txt: str
3538
  @param txt: the message
3539

3540
  """
3541
  _ToStream(sys.stderr, txt, *args)
3542

    
3543

    
3544
class JobExecutor(object):
3545
  """Class which manages the submission and execution of multiple jobs.
3546

3547
  Note that instances of this class should not be reused between
3548
  GetResults() calls.
3549

3550
  """
3551
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3552
    self.queue = []
3553
    if cl is None:
3554
      cl = GetClient()
3555
    self.cl = cl
3556
    self.verbose = verbose
3557
    self.jobs = []
3558
    self.opts = opts
3559
    self.feedback_fn = feedback_fn
3560
    self._counter = itertools.count()
3561

    
3562
  @staticmethod
3563
  def _IfName(name, fmt):
3564
    """Helper function for formatting name.
3565

3566
    """
3567
    if name:
3568
      return fmt % name
3569

    
3570
    return ""
3571

    
3572
  def QueueJob(self, name, *ops):
3573
    """Record a job for later submit.
3574

3575
    @type name: string
3576
    @param name: a description of the job, will be used in WaitJobSet
3577

3578
    """
3579
    SetGenericOpcodeOpts(ops, self.opts)
3580
    self.queue.append((self._counter.next(), name, ops))
3581

    
3582
  def AddJobId(self, name, status, job_id):
3583
    """Adds a job ID to the internal queue.
3584

3585
    """
3586
    self.jobs.append((self._counter.next(), status, job_id, name))
3587

    
3588
  def SubmitPending(self, each=False):
3589
    """Submit all pending jobs.
3590

3591
    """
3592
    if each:
3593
      results = []
3594
      for (_, _, ops) in self.queue:
3595
        # SubmitJob will remove the success status, but raise an exception if
3596
        # the submission fails, so we'll notice that anyway.
3597
        results.append([True, self.cl.SubmitJob(ops)[0]])
3598
    else:
3599
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3600
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3601
      self.jobs.append((idx, status, data, name))
3602

    
3603
  def _ChooseJob(self):
3604
    """Choose a non-waiting/queued job to poll next.
3605

3606
    """
3607
    assert self.jobs, "_ChooseJob called with empty job list"
3608

    
3609
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3610
                               ["status"])
3611
    assert result
3612

    
3613
    for job_data, status in zip(self.jobs, result):
3614
      if (isinstance(status, list) and status and
3615
          status[0] in (constants.JOB_STATUS_QUEUED,
3616
                        constants.JOB_STATUS_WAITING,
3617
                        constants.JOB_STATUS_CANCELING)):
3618
        # job is still present and waiting
3619
        continue
3620
      # good candidate found (either running job or lost job)
3621
      self.jobs.remove(job_data)
3622
      return job_data
3623

    
3624
    # no job found
3625
    return self.jobs.pop(0)
3626

    
3627
  def GetResults(self):
3628
    """Wait for and return the results of all jobs.
3629

3630
    @rtype: list
3631
    @return: list of tuples (success, job results), in the same order
3632
        as the submitted jobs; if a job has failed, instead of the result
3633
        there will be the error message
3634

3635
    """
3636
    if not self.jobs:
3637
      self.SubmitPending()
3638
    results = []
3639
    if self.verbose:
3640
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3641
      if ok_jobs:
3642
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3643

    
3644
    # first, remove any non-submitted jobs
3645
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3646
    for idx, _, jid, name in failures:
3647
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3648
      results.append((idx, False, jid))
3649

    
3650
    while self.jobs:
3651
      (idx, _, jid, name) = self._ChooseJob()
3652
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3653
      try:
3654
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3655
        success = True
3656
      except errors.JobLost, err:
3657
        _, job_result = FormatError(err)
3658
        ToStderr("Job %s%s has been archived, cannot check its result",
3659
                 jid, self._IfName(name, " for %s"))
3660
        success = False
3661
      except (errors.GenericError, luxi.ProtocolError), err:
3662
        _, job_result = FormatError(err)
3663
        success = False
3664
        # the error message will always be shown, verbose or not
3665
        ToStderr("Job %s%s has failed: %s",
3666
                 jid, self._IfName(name, " for %s"), job_result)
3667

    
3668
      results.append((idx, success, job_result))
3669

    
3670
    # sort based on the index, then drop it
3671
    results.sort()
3672
    results = [i[1:] for i in results]
3673

    
3674
    return results
3675

    
3676
  def WaitOrShow(self, wait):
3677
    """Wait for job results or only print the job IDs.
3678

3679
    @type wait: boolean
3680
    @param wait: whether to wait or not
3681

3682
    """
3683
    if wait:
3684
      return self.GetResults()
3685
    else:
3686
      if not self.jobs:
3687
        self.SubmitPending()
3688
      for _, status, result, name in self.jobs:
3689
        if status:
3690
          ToStdout("%s: %s", result, name)
3691
        else:
3692
          ToStderr("Failure for %s: %s", name, result)
3693
      return [row[1:3] for row in self.jobs]
3694

    
3695

    
3696
def FormatParamsDictInfo(param_dict, actual):
3697
  """Formats a parameter dictionary.
3698

3699
  @type param_dict: dict
3700
  @param param_dict: the own parameters
3701
  @type actual: dict
3702
  @param actual: the current parameter set (including defaults)
3703
  @rtype: dict
3704
  @return: dictionary where the value of each parameter is either a fully
3705
      formatted string or a dictionary containing formatted strings
3706

3707
  """
3708
  ret = {}
3709
  for (key, data) in actual.items():
3710
    if isinstance(data, dict) and data:
3711
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3712
    else:
3713
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3714
  return ret
3715

    
3716

    
3717
def _FormatListInfoDefault(data, def_data):
3718
  if data is not None:
3719
    ret = utils.CommaJoin(data)
3720
  else:
3721
    ret = "default (%s)" % utils.CommaJoin(def_data)
3722
  return ret
3723

    
3724

    
3725
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3726
  """Formats an instance policy.
3727

3728
  @type custom_ipolicy: dict
3729
  @param custom_ipolicy: own policy
3730
  @type eff_ipolicy: dict
3731
  @param eff_ipolicy: effective policy (including defaults); ignored for
3732
      cluster
3733
  @type iscluster: bool
3734
  @param iscluster: the policy is at cluster level
3735
  @rtype: list of pairs
3736
  @return: formatted data, suitable for L{PrintGenericInfo}
3737

3738
  """
3739
  if iscluster:
3740
    eff_ipolicy = custom_ipolicy
3741

    
3742
  minmax_out = []
3743
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3744
  if custom_minmax:
3745
    for (k, minmax) in enumerate(custom_minmax):
3746
      minmax_out.append([
3747
        ("%s/%s" % (key, k),
3748
         FormatParamsDictInfo(minmax[key], minmax[key]))
3749
        for key in constants.ISPECS_MINMAX_KEYS
3750
        ])
3751
  else:
3752
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3753
      minmax_out.append([
3754
        ("%s/%s" % (key, k),
3755
         FormatParamsDictInfo({}, minmax[key]))
3756
        for key in constants.ISPECS_MINMAX_KEYS
3757
        ])
3758
  ret = [("bounds specs", minmax_out)]
3759

    
3760
  if iscluster:
3761
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3762
    ret.append(
3763
      (constants.ISPECS_STD,
3764
       FormatParamsDictInfo(stdspecs, stdspecs))
3765
      )
3766

    
3767
  ret.append(
3768
    ("allowed disk templates",
3769
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3770
                            eff_ipolicy[constants.IPOLICY_DTS]))
3771
    )
3772
  ret.extend([
3773
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3774
    for key in constants.IPOLICY_PARAMETERS
3775
    ])
3776
  return ret
3777

    
3778

    
3779
def _PrintSpecsParameters(buf, specs):
3780
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3781
  buf.write(",".join(values))
3782

    
3783

    
3784
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3785
  """Print the command option used to generate the given instance policy.
3786

3787
  Currently only the parts dealing with specs are supported.
3788

3789
  @type buf: StringIO
3790
  @param buf: stream to write into
3791
  @type ipolicy: dict
3792
  @param ipolicy: instance policy
3793
  @type isgroup: bool
3794
  @param isgroup: whether the policy is at group level
3795

3796
  """
3797
  if not isgroup:
3798
    stdspecs = ipolicy.get("std")
3799
    if stdspecs:
3800
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3801
      _PrintSpecsParameters(buf, stdspecs)
3802
  minmaxes = ipolicy.get("minmax", [])
3803
  first = True
3804
  for minmax in minmaxes:
3805
    minspecs = minmax.get("min")
3806
    maxspecs = minmax.get("max")
3807
    if minspecs and maxspecs:
3808
      if first:
3809
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3810
        first = False
3811
      else:
3812
        buf.write("//")
3813
      buf.write("min:")
3814
      _PrintSpecsParameters(buf, minspecs)
3815
      buf.write("/max:")
3816
      _PrintSpecsParameters(buf, maxspecs)
3817

    
3818

    
3819
def ConfirmOperation(names, list_type, text, extra=""):
3820
  """Ask the user to confirm an operation on a list of list_type.
3821

3822
  This function is used to request confirmation for doing an operation
3823
  on a given list of list_type.
3824

3825
  @type names: list
3826
  @param names: the list of names that we display when
3827
      we ask for confirmation
3828
  @type list_type: str
3829
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3830
  @type text: str
3831
  @param text: the operation that the user should confirm
3832
  @rtype: boolean
3833
  @return: True or False depending on user's confirmation.
3834

3835
  """
3836
  count = len(names)
3837
  msg = ("The %s will operate on %d %s.\n%s"
3838
         "Do you want to continue?" % (text, count, list_type, extra))
3839
  affected = (("\nAffected %s:\n" % list_type) +
3840
              "\n".join(["  %s" % name for name in names]))
3841

    
3842
  choices = [("y", True, "Yes, execute the %s" % text),
3843
             ("n", False, "No, abort the %s" % text)]
3844

    
3845
  if count > 20:
3846
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3847
    question = msg
3848
  else:
3849
    question = msg + affected
3850

    
3851
  choice = AskUser(question, choices)
3852
  if choice == "v":
3853
    choices.pop(1)
3854
    choice = AskUser(msg + affected, choices)
3855
  return choice
3856

    
3857

    
3858
def _MaybeParseUnit(elements):
3859
  """Parses and returns an array of potential values with units.
3860

3861
  """
3862
  parsed = {}
3863
  for k, v in elements.items():
3864
    if v == constants.VALUE_DEFAULT:
3865
      parsed[k] = v
3866
    else:
3867
      parsed[k] = utils.ParseUnit(v)
3868
  return parsed
3869

    
3870

    
3871
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3872
                             ispecs_disk_count, ispecs_disk_size,
3873
                             ispecs_nic_count, group_ipolicy, fill_all):
3874
  try:
3875
    if ispecs_mem_size:
3876
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3877
    if ispecs_disk_size:
3878
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3879
  except (TypeError, ValueError, errors.UnitParseError), err:
3880
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3881
                               " in policy: %s" %
3882
                               (ispecs_disk_size, ispecs_mem_size, err),
3883
                               errors.ECODE_INVAL)
3884

    
3885
  # prepare ipolicy dict
3886
  ispecs_transposed = {
3887
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3888
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3889
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3890
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3891
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3892
    }
3893

    
3894
  # first, check that the values given are correct
3895
  if group_ipolicy:
3896
    forced_type = TISPECS_GROUP_TYPES
3897
  else:
3898
    forced_type = TISPECS_CLUSTER_TYPES
3899
  for specs in ispecs_transposed.values():
3900
    assert type(specs) is dict
3901
    utils.ForceDictType(specs, forced_type)
3902

    
3903
  # then transpose
3904
  ispecs = {
3905
    constants.ISPECS_MIN: {},
3906
    constants.ISPECS_MAX: {},
3907
    constants.ISPECS_STD: {},
3908
    }
3909
  for (name, specs) in ispecs_transposed.iteritems():
3910
    assert name in constants.ISPECS_PARAMETERS
3911
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3912
      assert key in ispecs
3913
      ispecs[key][name] = val
3914
  minmax_out = {}
3915
  for key in constants.ISPECS_MINMAX_KEYS:
3916
    if fill_all:
3917
      minmax_out[key] = \
3918
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3919
    else:
3920
      minmax_out[key] = ispecs[key]
3921
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3922
  if fill_all:
3923
    ipolicy[constants.ISPECS_STD] = \
3924
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3925
                         ispecs[constants.ISPECS_STD])
3926
  else:
3927
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3928

    
3929

    
3930
def _ParseSpecUnit(spec, keyname):
3931
  ret = spec.copy()
3932
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3933
    if k in ret:
3934
      try:
3935
        ret[k] = utils.ParseUnit(ret[k])
3936
      except (TypeError, ValueError, errors.UnitParseError), err:
3937
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3938
                                    " specs: %s" % (k, ret[k], keyname, err)),
3939
                                   errors.ECODE_INVAL)
3940
  return ret
3941

    
3942

    
3943
def _ParseISpec(spec, keyname, required):
3944
  ret = _ParseSpecUnit(spec, keyname)
3945
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3946
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3947
  if required and missing:
3948
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3949
                               (keyname, utils.CommaJoin(missing)),
3950
                               errors.ECODE_INVAL)
3951
  return ret
3952

    
3953

    
3954
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3955
  ret = None
3956
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3957
      len(minmax_ispecs[0]) == 1):
3958
    for (key, spec) in minmax_ispecs[0].items():
3959
      # This loop is executed exactly once
3960
      if key in allowed_values and not spec:
3961
        ret = key
3962
  return ret
3963

    
3964

    
3965
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3966
                            group_ipolicy, allowed_values):
3967
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3968
  if found_allowed is not None:
3969
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3970
  elif minmax_ispecs is not None:
3971
    minmax_out = []
3972
    for mmpair in minmax_ispecs:
3973
      mmpair_out = {}
3974
      for (key, spec) in mmpair.items():
3975
        if key not in constants.ISPECS_MINMAX_KEYS:
3976
          msg = "Invalid key in bounds instance specifications: %s" % key
3977
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3978
        mmpair_out[key] = _ParseISpec(spec, key, True)
3979
      minmax_out.append(mmpair_out)
3980
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3981
  if std_ispecs is not None:
3982
    assert not group_ipolicy # This is not an option for gnt-group
3983
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
3984

    
3985

    
3986
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3987
                          ispecs_cpu_count=None,
3988
                          ispecs_disk_count=None,
3989
                          ispecs_disk_size=None,
3990
                          ispecs_nic_count=None,
3991
                          minmax_ispecs=None,
3992
                          std_ispecs=None,
3993
                          ipolicy_disk_templates=None,
3994
                          ipolicy_vcpu_ratio=None,
3995
                          ipolicy_spindle_ratio=None,
3996
                          group_ipolicy=False,
3997
                          allowed_values=None,
3998
                          fill_all=False):
3999
  """Creation of instance policy based on command line options.
4000

4001
  @param fill_all: whether for cluster policies we should ensure that
4002
    all values are filled
4003

4004
  """
4005
  assert not (fill_all and allowed_values)
4006

    
4007
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4008
                 ispecs_disk_size or ispecs_nic_count)
4009
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4010
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4011
                               " together with any --ipolicy-xxx-specs option",
4012
                               errors.ECODE_INVAL)
4013

    
4014
  ipolicy_out = objects.MakeEmptyIPolicy()
4015
  if split_specs:
4016
    assert fill_all
4017
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4018
                             ispecs_disk_count, ispecs_disk_size,
4019
                             ispecs_nic_count, group_ipolicy, fill_all)
4020
  elif (minmax_ispecs is not None or std_ispecs is not None):
4021
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4022
                            group_ipolicy, allowed_values)
4023

    
4024
  if ipolicy_disk_templates is not None:
4025
    if allowed_values and ipolicy_disk_templates in allowed_values:
4026
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4027
    else:
4028
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4029
  if ipolicy_vcpu_ratio is not None:
4030
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4031
  if ipolicy_spindle_ratio is not None:
4032
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4033

    
4034
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4035

    
4036
  if not group_ipolicy and fill_all:
4037
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4038

    
4039
  return ipolicy_out
4040

    
4041

    
4042
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4043
  """Formatting core of L{PrintGenericInfo}.
4044

4045
  @param buf: (string) stream to accumulate the result into
4046
  @param data: data to format
4047
  @type level: int
4048
  @param level: depth in the data hierarchy, used for indenting
4049
  @type afterkey: bool
4050
  @param afterkey: True when we are in the middle of a line after a key (used
4051
      to properly add newlines or indentation)
4052

4053
  """
4054
  baseind = "  "
4055
  if isinstance(data, dict):
4056
    if not data:
4057
      buf.write("\n")
4058
    else:
4059
      if afterkey:
4060
        buf.write("\n")
4061
        doindent = True
4062
      else:
4063
        doindent = False
4064
      for key in sorted(data):
4065
        if doindent:
4066
          buf.write(baseind * level)
4067
        else:
4068
          doindent = True
4069
        buf.write(key)
4070
        buf.write(": ")
4071
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4072
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4073
    # list of tuples (an ordered dictionary)
4074
    if afterkey:
4075
      buf.write("\n")
4076
      doindent = True
4077
    else:
4078
      doindent = False
4079
    for (key, val) in data:
4080
      if doindent:
4081
        buf.write(baseind * level)
4082
      else:
4083
        doindent = True
4084
      buf.write(key)
4085
      buf.write(": ")
4086
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4087
  elif isinstance(data, list):
4088
    if not data:
4089
      buf.write("\n")
4090
    else:
4091
      if afterkey:
4092
        buf.write("\n")
4093
        doindent = True
4094
      else:
4095
        doindent = False
4096
      for item in data:
4097
        if doindent:
4098
          buf.write(baseind * level)
4099
        else:
4100
          doindent = True
4101
        buf.write("-")
4102
        buf.write(baseind[1:])
4103
        _SerializeGenericInfo(buf, item, level + 1)
4104
  else:
4105
    # This branch should be only taken for strings, but it's practically
4106
    # impossible to guarantee that no other types are produced somewhere
4107
    buf.write(str(data))
4108
    buf.write("\n")
4109

    
4110

    
4111
def PrintGenericInfo(data):
4112
  """Print information formatted according to the hierarchy.
4113

4114
  The output is a valid YAML string.
4115

4116
  @param data: the data to print. It's a hierarchical structure whose elements
4117
      can be:
4118
        - dictionaries, where keys are strings and values are of any of the
4119
          types listed here
4120
        - lists of pairs (key, value), where key is a string and value is of
4121
          any of the types listed here; it's a way to encode ordered
4122
          dictionaries
4123
        - lists of any of the types listed here
4124
        - strings
4125

4126
  """
4127
  buf = StringIO()
4128
  _SerializeGenericInfo(buf, data, 0)
4129
  ToStdout(buf.getvalue().rstrip("\n"))