Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 919db916

History | View | Annotate | Download (134.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INCLUDEDEFAULTS_OPT",
112
  "INTERVAL_OPT",
113
  "MAC_PREFIX_OPT",
114
  "MAINTAIN_NODE_HEALTH_OPT",
115
  "MASTER_NETDEV_OPT",
116
  "MASTER_NETMASK_OPT",
117
  "MC_OPT",
118
  "MIGRATION_MODE_OPT",
119
  "NET_OPT",
120
  "NETWORK_OPT",
121
  "NETWORK6_OPT",
122
  "NEW_CLUSTER_CERT_OPT",
123
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
124
  "NEW_CONFD_HMAC_KEY_OPT",
125
  "NEW_RAPI_CERT_OPT",
126
  "NEW_PRIMARY_OPT",
127
  "NEW_SECONDARY_OPT",
128
  "NEW_SPICE_CERT_OPT",
129
  "NIC_PARAMS_OPT",
130
  "NOCONFLICTSCHECK_OPT",
131
  "NODE_FORCE_JOIN_OPT",
132
  "NODE_LIST_OPT",
133
  "NODE_PLACEMENT_OPT",
134
  "NODEGROUP_OPT",
135
  "NODE_PARAMS_OPT",
136
  "NODE_POWERED_OPT",
137
  "NODRBD_STORAGE_OPT",
138
  "NOHDR_OPT",
139
  "NOIPCHECK_OPT",
140
  "NO_INSTALL_OPT",
141
  "NONAMECHECK_OPT",
142
  "NOLVM_STORAGE_OPT",
143
  "NOMODIFY_ETCHOSTS_OPT",
144
  "NOMODIFY_SSH_SETUP_OPT",
145
  "NONICS_OPT",
146
  "NONLIVE_OPT",
147
  "NONPLUS1_OPT",
148
  "NORUNTIME_CHGS_OPT",
149
  "NOSHUTDOWN_OPT",
150
  "NOSTART_OPT",
151
  "NOSSH_KEYCHECK_OPT",
152
  "NOVOTING_OPT",
153
  "NO_REMEMBER_OPT",
154
  "NWSYNC_OPT",
155
  "OFFLINE_INST_OPT",
156
  "ONLINE_INST_OPT",
157
  "ON_PRIMARY_OPT",
158
  "ON_SECONDARY_OPT",
159
  "OFFLINE_OPT",
160
  "OSPARAMS_OPT",
161
  "OS_OPT",
162
  "OS_SIZE_OPT",
163
  "OOB_TIMEOUT_OPT",
164
  "POWER_DELAY_OPT",
165
  "PREALLOC_WIPE_DISKS_OPT",
166
  "PRIMARY_IP_VERSION_OPT",
167
  "PRIMARY_ONLY_OPT",
168
  "PRIORITY_OPT",
169
  "RAPI_CERT_OPT",
170
  "READD_OPT",
171
  "REASON_OPT",
172
  "REBOOT_TYPE_OPT",
173
  "REMOVE_INSTANCE_OPT",
174
  "REMOVE_RESERVED_IPS_OPT",
175
  "REMOVE_UIDS_OPT",
176
  "RESERVED_LVS_OPT",
177
  "RUNTIME_MEM_OPT",
178
  "ROMAN_OPT",
179
  "SECONDARY_IP_OPT",
180
  "SECONDARY_ONLY_OPT",
181
  "SELECT_OS_OPT",
182
  "SEP_OPT",
183
  "SHOWCMD_OPT",
184
  "SHOW_MACHINE_OPT",
185
  "SHUTDOWN_TIMEOUT_OPT",
186
  "SINGLE_NODE_OPT",
187
  "SPECS_CPU_COUNT_OPT",
188
  "SPECS_DISK_COUNT_OPT",
189
  "SPECS_DISK_SIZE_OPT",
190
  "SPECS_MEM_SIZE_OPT",
191
  "SPECS_NIC_COUNT_OPT",
192
  "SPLIT_ISPECS_OPTS",
193
  "IPOLICY_STD_SPECS_OPT",
194
  "IPOLICY_DISK_TEMPLATES",
195
  "IPOLICY_VCPU_RATIO",
196
  "SPICE_CACERT_OPT",
197
  "SPICE_CERT_OPT",
198
  "SRC_DIR_OPT",
199
  "SRC_NODE_OPT",
200
  "SUBMIT_OPT",
201
  "STARTUP_PAUSED_OPT",
202
  "STATIC_OPT",
203
  "SYNC_OPT",
204
  "TAG_ADD_OPT",
205
  "TAG_SRC_OPT",
206
  "TIMEOUT_OPT",
207
  "TO_GROUP_OPT",
208
  "UIDPOOL_OPT",
209
  "USEUNITS_OPT",
210
  "USE_EXTERNAL_MIP_SCRIPT",
211
  "USE_REPL_NET_OPT",
212
  "VERBOSE_OPT",
213
  "VG_NAME_OPT",
214
  "WFSYNC_OPT",
215
  "YES_DOIT_OPT",
216
  "DISK_STATE_OPT",
217
  "HV_STATE_OPT",
218
  "IGNORE_IPOLICY_OPT",
219
  "INSTANCE_POLICY_OPTS",
220
  # Generic functions for CLI programs
221
  "ConfirmOperation",
222
  "CreateIPolicyFromOpts",
223
  "GenericMain",
224
  "GenericInstanceCreate",
225
  "GenericList",
226
  "GenericListFields",
227
  "GetClient",
228
  "GetOnlineNodes",
229
  "JobExecutor",
230
  "JobSubmittedException",
231
  "ParseTimespec",
232
  "RunWhileClusterStopped",
233
  "SubmitOpCode",
234
  "SubmitOrSend",
235
  "UsesRPC",
236
  # Formatting functions
237
  "ToStderr", "ToStdout",
238
  "FormatError",
239
  "FormatQueryResult",
240
  "FormatParamsDictInfo",
241
  "FormatPolicyInfo",
242
  "PrintIPolicyCommand",
243
  "PrintGenericInfo",
244
  "GenerateTable",
245
  "AskUser",
246
  "FormatTimestamp",
247
  "FormatLogMessage",
248
  # Tags functions
249
  "ListTags",
250
  "AddTags",
251
  "RemoveTags",
252
  # command line options support infrastructure
253
  "ARGS_MANY_INSTANCES",
254
  "ARGS_MANY_NODES",
255
  "ARGS_MANY_GROUPS",
256
  "ARGS_MANY_NETWORKS",
257
  "ARGS_NONE",
258
  "ARGS_ONE_INSTANCE",
259
  "ARGS_ONE_NODE",
260
  "ARGS_ONE_GROUP",
261
  "ARGS_ONE_OS",
262
  "ARGS_ONE_NETWORK",
263
  "ArgChoice",
264
  "ArgCommand",
265
  "ArgFile",
266
  "ArgGroup",
267
  "ArgHost",
268
  "ArgInstance",
269
  "ArgJobId",
270
  "ArgNetwork",
271
  "ArgNode",
272
  "ArgOs",
273
  "ArgExtStorage",
274
  "ArgSuggest",
275
  "ArgUnknown",
276
  "OPT_COMPL_INST_ADD_NODES",
277
  "OPT_COMPL_MANY_NODES",
278
  "OPT_COMPL_ONE_IALLOCATOR",
279
  "OPT_COMPL_ONE_INSTANCE",
280
  "OPT_COMPL_ONE_NODE",
281
  "OPT_COMPL_ONE_NODEGROUP",
282
  "OPT_COMPL_ONE_NETWORK",
283
  "OPT_COMPL_ONE_OS",
284
  "OPT_COMPL_ONE_EXTSTORAGE",
285
  "cli_option",
286
  "SplitNodeOption",
287
  "CalculateOSNames",
288
  "ParseFields",
289
  "COMMON_CREATE_OPTS",
290
  ]
291

    
292
NO_PREFIX = "no_"
293
UN_PREFIX = "-"
294

    
295
#: Priorities (sorted)
296
_PRIORITY_NAMES = [
297
  ("low", constants.OP_PRIO_LOW),
298
  ("normal", constants.OP_PRIO_NORMAL),
299
  ("high", constants.OP_PRIO_HIGH),
300
  ]
301

    
302
#: Priority dictionary for easier lookup
303
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
304
# we migrate to Python 2.6
305
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
306

    
307
# Query result status for clients
308
(QR_NORMAL,
309
 QR_UNKNOWN,
310
 QR_INCOMPLETE) = range(3)
311

    
312
#: Maximum batch size for ChooseJob
313
_CHOOSE_BATCH = 25
314

    
315

    
316
# constants used to create InstancePolicy dictionary
317
TISPECS_GROUP_TYPES = {
318
  constants.ISPECS_MIN: constants.VTYPE_INT,
319
  constants.ISPECS_MAX: constants.VTYPE_INT,
320
  }
321

    
322
TISPECS_CLUSTER_TYPES = {
323
  constants.ISPECS_MIN: constants.VTYPE_INT,
324
  constants.ISPECS_MAX: constants.VTYPE_INT,
325
  constants.ISPECS_STD: constants.VTYPE_INT,
326
  }
327

    
328
#: User-friendly names for query2 field types
329
_QFT_NAMES = {
330
  constants.QFT_UNKNOWN: "Unknown",
331
  constants.QFT_TEXT: "Text",
332
  constants.QFT_BOOL: "Boolean",
333
  constants.QFT_NUMBER: "Number",
334
  constants.QFT_UNIT: "Storage size",
335
  constants.QFT_TIMESTAMP: "Timestamp",
336
  constants.QFT_OTHER: "Custom",
337
  }
338

    
339

    
340
class _Argument:
341
  def __init__(self, min=0, max=None): # pylint: disable=W0622
342
    self.min = min
343
    self.max = max
344

    
345
  def __repr__(self):
346
    return ("<%s min=%s max=%s>" %
347
            (self.__class__.__name__, self.min, self.max))
348

    
349

    
350
class ArgSuggest(_Argument):
351
  """Suggesting argument.
352

353
  Value can be any of the ones passed to the constructor.
354

355
  """
356
  # pylint: disable=W0622
357
  def __init__(self, min=0, max=None, choices=None):
358
    _Argument.__init__(self, min=min, max=max)
359
    self.choices = choices
360

    
361
  def __repr__(self):
362
    return ("<%s min=%s max=%s choices=%r>" %
363
            (self.__class__.__name__, self.min, self.max, self.choices))
364

    
365

    
366
class ArgChoice(ArgSuggest):
367
  """Choice argument.
368

369
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
370
  but value must be one of the choices.
371

372
  """
373

    
374

    
375
class ArgUnknown(_Argument):
376
  """Unknown argument to program (e.g. determined at runtime).
377

378
  """
379

    
380

    
381
class ArgInstance(_Argument):
382
  """Instances argument.
383

384
  """
385

    
386

    
387
class ArgNode(_Argument):
388
  """Node argument.
389

390
  """
391

    
392

    
393
class ArgNetwork(_Argument):
394
  """Network argument.
395

396
  """
397

    
398

    
399
class ArgGroup(_Argument):
400
  """Node group argument.
401

402
  """
403

    
404

    
405
class ArgJobId(_Argument):
406
  """Job ID argument.
407

408
  """
409

    
410

    
411
class ArgFile(_Argument):
412
  """File path argument.
413

414
  """
415

    
416

    
417
class ArgCommand(_Argument):
418
  """Command argument.
419

420
  """
421

    
422

    
423
class ArgHost(_Argument):
424
  """Host argument.
425

426
  """
427

    
428

    
429
class ArgOs(_Argument):
430
  """OS argument.
431

432
  """
433

    
434

    
435
class ArgExtStorage(_Argument):
436
  """ExtStorage argument.
437

438
  """
439

    
440

    
441
ARGS_NONE = []
442
ARGS_MANY_INSTANCES = [ArgInstance()]
443
ARGS_MANY_NETWORKS = [ArgNetwork()]
444
ARGS_MANY_NODES = [ArgNode()]
445
ARGS_MANY_GROUPS = [ArgGroup()]
446
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
447
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
448
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
449
# TODO
450
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
451
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
452

    
453

    
454
def _ExtractTagsObject(opts, args):
455
  """Extract the tag type object.
456

457
  Note that this function will modify its args parameter.
458

459
  """
460
  if not hasattr(opts, "tag_type"):
461
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
462
  kind = opts.tag_type
463
  if kind == constants.TAG_CLUSTER:
464
    retval = kind, None
465
  elif kind in (constants.TAG_NODEGROUP,
466
                constants.TAG_NODE,
467
                constants.TAG_NETWORK,
468
                constants.TAG_INSTANCE):
469
    if not args:
470
      raise errors.OpPrereqError("no arguments passed to the command",
471
                                 errors.ECODE_INVAL)
472
    name = args.pop(0)
473
    retval = kind, name
474
  else:
475
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
476
  return retval
477

    
478

    
479
def _ExtendTags(opts, args):
480
  """Extend the args if a source file has been given.
481

482
  This function will extend the tags with the contents of the file
483
  passed in the 'tags_source' attribute of the opts parameter. A file
484
  named '-' will be replaced by stdin.
485

486
  """
487
  fname = opts.tags_source
488
  if fname is None:
489
    return
490
  if fname == "-":
491
    new_fh = sys.stdin
492
  else:
493
    new_fh = open(fname, "r")
494
  new_data = []
495
  try:
496
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
497
    # because of python bug 1633941
498
    while True:
499
      line = new_fh.readline()
500
      if not line:
501
        break
502
      new_data.append(line.strip())
503
  finally:
504
    new_fh.close()
505
  args.extend(new_data)
506

    
507

    
508
def ListTags(opts, args):
509
  """List the tags on a given object.
510

511
  This is a generic implementation that knows how to deal with all
512
  three cases of tag objects (cluster, node, instance). The opts
513
  argument is expected to contain a tag_type field denoting what
514
  object type we work on.
515

516
  """
517
  kind, name = _ExtractTagsObject(opts, args)
518
  cl = GetClient(query=True)
519
  result = cl.QueryTags(kind, name)
520
  result = list(result)
521
  result.sort()
522
  for tag in result:
523
    ToStdout(tag)
524

    
525

    
526
def AddTags(opts, args):
527
  """Add tags on a given object.
528

529
  This is a generic implementation that knows how to deal with all
530
  three cases of tag objects (cluster, node, instance). The opts
531
  argument is expected to contain a tag_type field denoting what
532
  object type we work on.
533

534
  """
535
  kind, name = _ExtractTagsObject(opts, args)
536
  _ExtendTags(opts, args)
537
  if not args:
538
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
539
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
540
  SubmitOrSend(op, opts)
541

    
542

    
543
def RemoveTags(opts, args):
544
  """Remove tags from a given object.
545

546
  This is a generic implementation that knows how to deal with all
547
  three cases of tag objects (cluster, node, instance). The opts
548
  argument is expected to contain a tag_type field denoting what
549
  object type we work on.
550

551
  """
552
  kind, name = _ExtractTagsObject(opts, args)
553
  _ExtendTags(opts, args)
554
  if not args:
555
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
556
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
557
  SubmitOrSend(op, opts)
558

    
559

    
560
def check_unit(option, opt, value): # pylint: disable=W0613
561
  """OptParsers custom converter for units.
562

563
  """
564
  try:
565
    return utils.ParseUnit(value)
566
  except errors.UnitParseError, err:
567
    raise OptionValueError("option %s: %s" % (opt, err))
568

    
569

    
570
def _SplitKeyVal(opt, data, parse_prefixes):
571
  """Convert a KeyVal string into a dict.
572

573
  This function will convert a key=val[,...] string into a dict. Empty
574
  values will be converted specially: keys which have the prefix 'no_'
575
  will have the value=False and the prefix stripped, keys with the prefix
576
  "-" will have value=None and the prefix stripped, and the others will
577
  have value=True.
578

579
  @type opt: string
580
  @param opt: a string holding the option name for which we process the
581
      data, used in building error messages
582
  @type data: string
583
  @param data: a string of the format key=val,key=val,...
584
  @type parse_prefixes: bool
585
  @param parse_prefixes: whether to handle prefixes specially
586
  @rtype: dict
587
  @return: {key=val, key=val}
588
  @raises errors.ParameterError: if there are duplicate keys
589

590
  """
591
  kv_dict = {}
592
  if data:
593
    for elem in utils.UnescapeAndSplit(data, sep=","):
594
      if "=" in elem:
595
        key, val = elem.split("=", 1)
596
      elif parse_prefixes:
597
        if elem.startswith(NO_PREFIX):
598
          key, val = elem[len(NO_PREFIX):], False
599
        elif elem.startswith(UN_PREFIX):
600
          key, val = elem[len(UN_PREFIX):], None
601
        else:
602
          key, val = elem, True
603
      else:
604
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
605
                                    (elem, opt))
606
      if key in kv_dict:
607
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
608
                                    (key, opt))
609
      kv_dict[key] = val
610
  return kv_dict
611

    
612

    
613
def _SplitIdentKeyVal(opt, value, parse_prefixes):
614
  """Helper function to parse "ident:key=val,key=val" options.
615

616
  @type opt: string
617
  @param opt: option name, used in error messages
618
  @type value: string
619
  @param value: expected to be in the format "ident:key=val,key=val,..."
620
  @type parse_prefixes: bool
621
  @param parse_prefixes: whether to handle prefixes specially (see
622
      L{_SplitKeyVal})
623
  @rtype: tuple
624
  @return: (ident, {key=val, key=val})
625
  @raises errors.ParameterError: in case of duplicates or other parsing errors
626

627
  """
628
  if ":" not in value:
629
    ident, rest = value, ""
630
  else:
631
    ident, rest = value.split(":", 1)
632

    
633
  if parse_prefixes and ident.startswith(NO_PREFIX):
634
    if rest:
635
      msg = "Cannot pass options when removing parameter groups: %s" % value
636
      raise errors.ParameterError(msg)
637
    retval = (ident[len(NO_PREFIX):], False)
638
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
639
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
640
    if rest:
641
      msg = "Cannot pass options when removing parameter groups: %s" % value
642
      raise errors.ParameterError(msg)
643
    retval = (ident[len(UN_PREFIX):], None)
644
  else:
645
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
646
    retval = (ident, kv_dict)
647
  return retval
648

    
649

    
650
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
651
  """Custom parser for ident:key=val,key=val options.
652

653
  This will store the parsed values as a tuple (ident, {key: val}). As such,
654
  multiple uses of this option via action=append is possible.
655

656
  """
657
  return _SplitIdentKeyVal(opt, value, True)
658

    
659

    
660
def check_key_val(option, opt, value):  # pylint: disable=W0613
661
  """Custom parser class for key=val,key=val options.
662

663
  This will store the parsed values as a dict {key: val}.
664

665
  """
666
  return _SplitKeyVal(opt, value, True)
667

    
668

    
669
def _SplitListKeyVal(opt, value):
670
  retval = {}
671
  for elem in value.split("/"):
672
    if not elem:
673
      raise errors.ParameterError("Empty section in option '%s'" % opt)
674
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
675
    if ident in retval:
676
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
677
             (ident, opt, elem))
678
      raise errors.ParameterError(msg)
679
    retval[ident] = valdict
680
  return retval
681

    
682

    
683
def check_list_ident_key_val(_, opt, value):
684
  """Custom parser for "ident:key=val,key=val/ident:key=val" options.
685

686
  @rtype: list of dictionary
687
  @return: {ident: {key: val, key: val}, ident: {key: val}}
688

689
  """
690
  return _SplitListKeyVal(opt, value)
691

    
692

    
693
def check_bool(option, opt, value): # pylint: disable=W0613
694
  """Custom parser for yes/no options.
695

696
  This will store the parsed value as either True or False.
697

698
  """
699
  value = value.lower()
700
  if value == constants.VALUE_FALSE or value == "no":
701
    return False
702
  elif value == constants.VALUE_TRUE or value == "yes":
703
    return True
704
  else:
705
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
706

    
707

    
708
def check_list(option, opt, value): # pylint: disable=W0613
709
  """Custom parser for comma-separated lists.
710

711
  """
712
  # we have to make this explicit check since "".split(",") is [""],
713
  # not an empty list :(
714
  if not value:
715
    return []
716
  else:
717
    return utils.UnescapeAndSplit(value)
718

    
719

    
720
def check_maybefloat(option, opt, value): # pylint: disable=W0613
721
  """Custom parser for float numbers which might be also defaults.
722

723
  """
724
  value = value.lower()
725

    
726
  if value == constants.VALUE_DEFAULT:
727
    return value
728
  else:
729
    return float(value)
730

    
731

    
732
# completion_suggestion is normally a list. Using numeric values not evaluating
733
# to False for dynamic completion.
734
(OPT_COMPL_MANY_NODES,
735
 OPT_COMPL_ONE_NODE,
736
 OPT_COMPL_ONE_INSTANCE,
737
 OPT_COMPL_ONE_OS,
738
 OPT_COMPL_ONE_EXTSTORAGE,
739
 OPT_COMPL_ONE_IALLOCATOR,
740
 OPT_COMPL_ONE_NETWORK,
741
 OPT_COMPL_INST_ADD_NODES,
742
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
743

    
744
OPT_COMPL_ALL = compat.UniqueFrozenset([
745
  OPT_COMPL_MANY_NODES,
746
  OPT_COMPL_ONE_NODE,
747
  OPT_COMPL_ONE_INSTANCE,
748
  OPT_COMPL_ONE_OS,
749
  OPT_COMPL_ONE_EXTSTORAGE,
750
  OPT_COMPL_ONE_IALLOCATOR,
751
  OPT_COMPL_ONE_NETWORK,
752
  OPT_COMPL_INST_ADD_NODES,
753
  OPT_COMPL_ONE_NODEGROUP,
754
  ])
755

    
756

    
757
class CliOption(Option):
758
  """Custom option class for optparse.
759

760
  """
761
  ATTRS = Option.ATTRS + [
762
    "completion_suggest",
763
    ]
764
  TYPES = Option.TYPES + (
765
    "listidentkeyval",
766
    "identkeyval",
767
    "keyval",
768
    "unit",
769
    "bool",
770
    "list",
771
    "maybefloat",
772
    )
773
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
774
  TYPE_CHECKER["listidentkeyval"] = check_list_ident_key_val
775
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
776
  TYPE_CHECKER["keyval"] = check_key_val
777
  TYPE_CHECKER["unit"] = check_unit
778
  TYPE_CHECKER["bool"] = check_bool
779
  TYPE_CHECKER["list"] = check_list
780
  TYPE_CHECKER["maybefloat"] = check_maybefloat
781

    
782

    
783
# optparse.py sets make_option, so we do it for our own option class, too
784
cli_option = CliOption
785

    
786

    
787
_YORNO = "yes|no"
788

    
789
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
790
                       help="Increase debugging level")
791

    
792
NOHDR_OPT = cli_option("--no-headers", default=False,
793
                       action="store_true", dest="no_headers",
794
                       help="Don't display column headers")
795

    
796
SEP_OPT = cli_option("--separator", default=None,
797
                     action="store", dest="separator",
798
                     help=("Separator between output fields"
799
                           " (defaults to one space)"))
800

    
801
USEUNITS_OPT = cli_option("--units", default=None,
802
                          dest="units", choices=("h", "m", "g", "t"),
803
                          help="Specify units for output (one of h/m/g/t)")
804

    
805
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
806
                        type="string", metavar="FIELDS",
807
                        help="Comma separated list of output fields")
808

    
809
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
810
                       default=False, help="Force the operation")
811

    
812
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
813
                         default=False, help="Do not require confirmation")
814

    
815
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
816
                                  action="store_true", default=False,
817
                                  help=("Ignore offline nodes and do as much"
818
                                        " as possible"))
819

    
820
TAG_ADD_OPT = cli_option("--tags", dest="tags",
821
                         default=None, help="Comma-separated list of instance"
822
                                            " tags")
823

    
824
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
825
                         default=None, help="File with tag names")
826

    
827
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
828
                        default=False, action="store_true",
829
                        help=("Submit the job and return the job ID, but"
830
                              " don't wait for the job to finish"))
831

    
832
SYNC_OPT = cli_option("--sync", dest="do_locking",
833
                      default=False, action="store_true",
834
                      help=("Grab locks while doing the queries"
835
                            " in order to ensure more consistent results"))
836

    
837
DRY_RUN_OPT = cli_option("--dry-run", default=False,
838
                         action="store_true",
839
                         help=("Do not execute the operation, just run the"
840
                               " check steps and verify if it could be"
841
                               " executed"))
842

    
843
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
844
                         action="store_true",
845
                         help="Increase the verbosity of the operation")
846

    
847
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
848
                              action="store_true", dest="simulate_errors",
849
                              help="Debugging option that makes the operation"
850
                              " treat most runtime checks as failed")
851

    
852
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
853
                        default=True, action="store_false",
854
                        help="Don't wait for sync (DANGEROUS!)")
855

    
856
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
857
                        default=False, action="store_true",
858
                        help="Wait for disks to sync")
859

    
860
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
861
                             action="store_true", default=False,
862
                             help="Enable offline instance")
863

    
864
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
865
                              action="store_true", default=False,
866
                              help="Disable down instance")
867

    
868
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
869
                               help=("Custom disk setup (%s)" %
870
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
871
                               default=None, metavar="TEMPL",
872
                               choices=list(constants.DISK_TEMPLATES))
873

    
874
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
875
                        help="Do not create any network cards for"
876
                        " the instance")
877

    
878
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
879
                               help="Relative path under default cluster-wide"
880
                               " file storage dir to store file-based disks",
881
                               default=None, metavar="<DIR>")
882

    
883
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
884
                                  help="Driver to use for image files",
885
                                  default="loop", metavar="<DRIVER>",
886
                                  choices=list(constants.FILE_DRIVER))
887

    
888
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
889
                            help="Select nodes for the instance automatically"
890
                            " using the <NAME> iallocator plugin",
891
                            default=None, type="string",
892
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
893

    
894
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
895
                                    metavar="<NAME>",
896
                                    help="Set the default instance"
897
                                    " allocator plugin",
898
                                    default=None, type="string",
899
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
900

    
901
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
902
                    metavar="<os>",
903
                    completion_suggest=OPT_COMPL_ONE_OS)
904

    
905
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
906
                          type="keyval", default={},
907
                          help="OS parameters")
908

    
909
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
910
                               action="store_true", default=False,
911
                               help="Force an unknown variant")
912

    
913
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
914
                            action="store_true", default=False,
915
                            help="Do not install the OS (will"
916
                            " enable no-start)")
917

    
918
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
919
                                dest="allow_runtime_chgs",
920
                                default=True, action="store_false",
921
                                help="Don't allow runtime changes")
922

    
923
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
924
                         type="keyval", default={},
925
                         help="Backend parameters")
926

    
927
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
928
                        default={}, dest="hvparams",
929
                        help="Hypervisor parameters")
930

    
931
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
932
                             help="Disk template parameters, in the format"
933
                             " template:option=value,option=value,...",
934
                             type="identkeyval", action="append", default=[])
935

    
936
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
937
                                 type="keyval", default={},
938
                                 help="Memory size specs: list of key=value,"
939
                                " where key is one of min, max, std"
940
                                 " (in MB or using a unit)")
941

    
942
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
943
                                 type="keyval", default={},
944
                                 help="CPU count specs: list of key=value,"
945
                                 " where key is one of min, max, std")
946

    
947
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
948
                                  dest="ispecs_disk_count",
949
                                  type="keyval", default={},
950
                                  help="Disk count specs: list of key=value,"
951
                                  " where key is one of min, max, std")
952

    
953
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
954
                                 type="keyval", default={},
955
                                 help="Disk size specs: list of key=value,"
956
                                 " where key is one of min, max, std"
957
                                 " (in MB or using a unit)")
958

    
959
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
960
                                 type="keyval", default={},
961
                                 help="NIC count specs: list of key=value,"
962
                                 " where key is one of min, max, std")
963

    
964
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
965
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
966
                                      dest="ipolicy_bounds_specs",
967
                                      type="listidentkeyval", default=None,
968
                                      help="Complete instance specs limits")
969

    
970
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
971
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
972
                                   dest="ipolicy_std_specs",
973
                                   type="keyval", default=None,
974
                                   help="Complte standard instance specs")
975

    
976
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
977
                                    dest="ipolicy_disk_templates",
978
                                    type="list", default=None,
979
                                    help="Comma-separated list of"
980
                                    " enabled disk templates")
981

    
982
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
983
                                 dest="ipolicy_vcpu_ratio",
984
                                 type="maybefloat", default=None,
985
                                 help="The maximum allowed vcpu-to-cpu ratio")
986

    
987
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
988
                                   dest="ipolicy_spindle_ratio",
989
                                   type="maybefloat", default=None,
990
                                   help=("The maximum allowed instances to"
991
                                         " spindle ratio"))
992

    
993
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
994
                            help="Hypervisor and hypervisor options, in the"
995
                            " format hypervisor:option=value,option=value,...",
996
                            default=None, type="identkeyval")
997

    
998
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
999
                        help="Hypervisor and hypervisor options, in the"
1000
                        " format hypervisor:option=value,option=value,...",
1001
                        default=[], action="append", type="identkeyval")
1002

    
1003
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1004
                           action="store_false",
1005
                           help="Don't check that the instance's IP"
1006
                           " is alive")
1007

    
1008
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1009
                             default=True, action="store_false",
1010
                             help="Don't check that the instance's name"
1011
                             " is resolvable")
1012

    
1013
NET_OPT = cli_option("--net",
1014
                     help="NIC parameters", default=[],
1015
                     dest="nics", action="append", type="identkeyval")
1016

    
1017
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1018
                      dest="disks", action="append", type="identkeyval")
1019

    
1020
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1021
                         help="Comma-separated list of disks"
1022
                         " indices to act on (e.g. 0,2) (optional,"
1023
                         " defaults to all disks)")
1024

    
1025
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1026
                         help="Enforces a single-disk configuration using the"
1027
                         " given disk size, in MiB unless a suffix is used",
1028
                         default=None, type="unit", metavar="<size>")
1029

    
1030
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1031
                                dest="ignore_consistency",
1032
                                action="store_true", default=False,
1033
                                help="Ignore the consistency of the disks on"
1034
                                " the secondary")
1035

    
1036
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1037
                                dest="allow_failover",
1038
                                action="store_true", default=False,
1039
                                help="If migration is not possible fallback to"
1040
                                     " failover")
1041

    
1042
NONLIVE_OPT = cli_option("--non-live", dest="live",
1043
                         default=True, action="store_false",
1044
                         help="Do a non-live migration (this usually means"
1045
                         " freeze the instance, save the state, transfer and"
1046
                         " only then resume running on the secondary node)")
1047

    
1048
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1049
                                default=None,
1050
                                choices=list(constants.HT_MIGRATION_MODES),
1051
                                help="Override default migration mode (choose"
1052
                                " either live or non-live")
1053

    
1054
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1055
                                help="Target node and optional secondary node",
1056
                                metavar="<pnode>[:<snode>]",
1057
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1058

    
1059
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1060
                           action="append", metavar="<node>",
1061
                           help="Use only this node (can be used multiple"
1062
                           " times, if not given defaults to all nodes)",
1063
                           completion_suggest=OPT_COMPL_ONE_NODE)
1064

    
1065
NODEGROUP_OPT_NAME = "--node-group"
1066
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1067
                           dest="nodegroup",
1068
                           help="Node group (name or uuid)",
1069
                           metavar="<nodegroup>",
1070
                           default=None, type="string",
1071
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1072

    
1073
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1074
                             metavar="<node>",
1075
                             completion_suggest=OPT_COMPL_ONE_NODE)
1076

    
1077
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1078
                         action="store_false",
1079
                         help="Don't start the instance after creation")
1080

    
1081
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1082
                         action="store_true", default=False,
1083
                         help="Show command instead of executing it")
1084

    
1085
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1086
                         default=False, action="store_true",
1087
                         help="Instead of performing the migration, try to"
1088
                         " recover from a failed cleanup. This is safe"
1089
                         " to run even if the instance is healthy, but it"
1090
                         " will create extra replication traffic and "
1091
                         " disrupt briefly the replication (like during the"
1092
                         " migration")
1093

    
1094
STATIC_OPT = cli_option("-s", "--static", dest="static",
1095
                        action="store_true", default=False,
1096
                        help="Only show configuration data, not runtime data")
1097

    
1098
ALL_OPT = cli_option("--all", dest="show_all",
1099
                     default=False, action="store_true",
1100
                     help="Show info on all instances on the cluster."
1101
                     " This can take a long time to run, use wisely")
1102

    
1103
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1104
                           action="store_true", default=False,
1105
                           help="Interactive OS reinstall, lists available"
1106
                           " OS templates for selection")
1107

    
1108
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1109
                                 action="store_true", default=False,
1110
                                 help="Remove the instance from the cluster"
1111
                                 " configuration even if there are failures"
1112
                                 " during the removal process")
1113

    
1114
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1115
                                        dest="ignore_remove_failures",
1116
                                        action="store_true", default=False,
1117
                                        help="Remove the instance from the"
1118
                                        " cluster configuration even if there"
1119
                                        " are failures during the removal"
1120
                                        " process")
1121

    
1122
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1123
                                 action="store_true", default=False,
1124
                                 help="Remove the instance from the cluster")
1125

    
1126
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1127
                               help="Specifies the new node for the instance",
1128
                               metavar="NODE", default=None,
1129
                               completion_suggest=OPT_COMPL_ONE_NODE)
1130

    
1131
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1132
                               help="Specifies the new secondary node",
1133
                               metavar="NODE", default=None,
1134
                               completion_suggest=OPT_COMPL_ONE_NODE)
1135

    
1136
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1137
                             help="Specifies the new primary node",
1138
                             metavar="<node>", default=None,
1139
                             completion_suggest=OPT_COMPL_ONE_NODE)
1140

    
1141
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1142
                            default=False, action="store_true",
1143
                            help="Replace the disk(s) on the primary"
1144
                                 " node (applies only to internally mirrored"
1145
                                 " disk templates, e.g. %s)" %
1146
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1147

    
1148
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1149
                              default=False, action="store_true",
1150
                              help="Replace the disk(s) on the secondary"
1151
                                   " node (applies only to internally mirrored"
1152
                                   " disk templates, e.g. %s)" %
1153
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1154

    
1155
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1156
                              default=False, action="store_true",
1157
                              help="Lock all nodes and auto-promote as needed"
1158
                              " to MC status")
1159

    
1160
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1161
                              default=False, action="store_true",
1162
                              help="Automatically replace faulty disks"
1163
                                   " (applies only to internally mirrored"
1164
                                   " disk templates, e.g. %s)" %
1165
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1166

    
1167
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1168
                             default=False, action="store_true",
1169
                             help="Ignore current recorded size"
1170
                             " (useful for forcing activation when"
1171
                             " the recorded size is wrong)")
1172

    
1173
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1174
                          metavar="<node>",
1175
                          completion_suggest=OPT_COMPL_ONE_NODE)
1176

    
1177
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1178
                         metavar="<dir>")
1179

    
1180
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1181
                              help="Specify the secondary ip for the node",
1182
                              metavar="ADDRESS", default=None)
1183

    
1184
READD_OPT = cli_option("--readd", dest="readd",
1185
                       default=False, action="store_true",
1186
                       help="Readd old node after replacing it")
1187

    
1188
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1189
                                default=True, action="store_false",
1190
                                help="Disable SSH key fingerprint checking")
1191

    
1192
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1193
                                 default=False, action="store_true",
1194
                                 help="Force the joining of a node")
1195

    
1196
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1197
                    type="bool", default=None, metavar=_YORNO,
1198
                    help="Set the master_candidate flag on the node")
1199

    
1200
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1201
                         type="bool", default=None,
1202
                         help=("Set the offline flag on the node"
1203
                               " (cluster does not communicate with offline"
1204
                               " nodes)"))
1205

    
1206
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1207
                         type="bool", default=None,
1208
                         help=("Set the drained flag on the node"
1209
                               " (excluded from allocation operations)"))
1210

    
1211
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1212
                              type="bool", default=None, metavar=_YORNO,
1213
                              help="Set the master_capable flag on the node")
1214

    
1215
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1216
                          type="bool", default=None, metavar=_YORNO,
1217
                          help="Set the vm_capable flag on the node")
1218

    
1219
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1220
                             type="bool", default=None, metavar=_YORNO,
1221
                             help="Set the allocatable flag on a volume")
1222

    
1223
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1224
                               help="Disable support for lvm based instances"
1225
                               " (cluster-wide)",
1226
                               action="store_false", default=True)
1227

    
1228
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1229
                            dest="enabled_hypervisors",
1230
                            help="Comma-separated list of hypervisors",
1231
                            type="string", default=None)
1232

    
1233
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1234
                                        dest="enabled_disk_templates",
1235
                                        help="Comma-separated list of "
1236
                                             "disk templates",
1237
                                        type="string", default=None)
1238

    
1239
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1240
                            type="keyval", default={},
1241
                            help="NIC parameters")
1242

    
1243
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1244
                         dest="candidate_pool_size", type="int",
1245
                         help="Set the candidate pool size")
1246

    
1247
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1248
                         help=("Enables LVM and specifies the volume group"
1249
                               " name (cluster-wide) for disk allocation"
1250
                               " [%s]" % constants.DEFAULT_VG),
1251
                         metavar="VG", default=None)
1252

    
1253
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1254
                          help="Destroy cluster", action="store_true")
1255

    
1256
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1257
                          help="Skip node agreement check (dangerous)",
1258
                          action="store_true", default=False)
1259

    
1260
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1261
                            help="Specify the mac prefix for the instance IP"
1262
                            " addresses, in the format XX:XX:XX",
1263
                            metavar="PREFIX",
1264
                            default=None)
1265

    
1266
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1267
                               help="Specify the node interface (cluster-wide)"
1268
                               " on which the master IP address will be added"
1269
                               " (cluster init default: %s)" %
1270
                               constants.DEFAULT_BRIDGE,
1271
                               metavar="NETDEV",
1272
                               default=None)
1273

    
1274
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1275
                                help="Specify the netmask of the master IP",
1276
                                metavar="NETMASK",
1277
                                default=None)
1278

    
1279
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1280
                                     dest="use_external_mip_script",
1281
                                     help="Specify whether to run a"
1282
                                     " user-provided script for the master"
1283
                                     " IP address turnup and"
1284
                                     " turndown operations",
1285
                                     type="bool", metavar=_YORNO, default=None)
1286

    
1287
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1288
                                help="Specify the default directory (cluster-"
1289
                                "wide) for storing the file-based disks [%s]" %
1290
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1291
                                metavar="DIR",
1292
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1293

    
1294
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1295
  "--shared-file-storage-dir",
1296
  dest="shared_file_storage_dir",
1297
  help="Specify the default directory (cluster-wide) for storing the"
1298
  " shared file-based disks [%s]" %
1299
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1300
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1301

    
1302
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1303
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1304
                                   action="store_false", default=True)
1305

    
1306
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1307
                                    help="Don't initialize SSH keys",
1308
                                    action="store_false", default=True)
1309

    
1310
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1311
                             help="Enable parseable error messages",
1312
                             action="store_true", default=False)
1313

    
1314
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1315
                          help="Skip N+1 memory redundancy tests",
1316
                          action="store_true", default=False)
1317

    
1318
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1319
                             help="Type of reboot: soft/hard/full",
1320
                             default=constants.INSTANCE_REBOOT_HARD,
1321
                             metavar="<REBOOT>",
1322
                             choices=list(constants.REBOOT_TYPES))
1323

    
1324
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1325
                                    dest="ignore_secondaries",
1326
                                    default=False, action="store_true",
1327
                                    help="Ignore errors from secondaries")
1328

    
1329
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1330
                            action="store_false", default=True,
1331
                            help="Don't shutdown the instance (unsafe)")
1332

    
1333
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1334
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1335
                         help="Maximum time to wait")
1336

    
1337
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1338
                                  dest="shutdown_timeout", type="int",
1339
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1340
                                  help="Maximum time to wait for instance"
1341
                                  " shutdown")
1342

    
1343
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1344
                          default=None,
1345
                          help=("Number of seconds between repetions of the"
1346
                                " command"))
1347

    
1348
EARLY_RELEASE_OPT = cli_option("--early-release",
1349
                               dest="early_release", default=False,
1350
                               action="store_true",
1351
                               help="Release the locks on the secondary"
1352
                               " node(s) early")
1353

    
1354
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1355
                                  dest="new_cluster_cert",
1356
                                  default=False, action="store_true",
1357
                                  help="Generate a new cluster certificate")
1358

    
1359
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1360
                           default=None,
1361
                           help="File containing new RAPI certificate")
1362

    
1363
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1364
                               default=None, action="store_true",
1365
                               help=("Generate a new self-signed RAPI"
1366
                                     " certificate"))
1367

    
1368
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1369
                            default=None,
1370
                            help="File containing new SPICE certificate")
1371

    
1372
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1373
                              default=None,
1374
                              help="File containing the certificate of the CA"
1375
                              " which signed the SPICE certificate")
1376

    
1377
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1378
                                dest="new_spice_cert", default=None,
1379
                                action="store_true",
1380
                                help=("Generate a new self-signed SPICE"
1381
                                      " certificate"))
1382

    
1383
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1384
                                    dest="new_confd_hmac_key",
1385
                                    default=False, action="store_true",
1386
                                    help=("Create a new HMAC key for %s" %
1387
                                          constants.CONFD))
1388

    
1389
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1390
                                       dest="cluster_domain_secret",
1391
                                       default=None,
1392
                                       help=("Load new new cluster domain"
1393
                                             " secret from file"))
1394

    
1395
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1396
                                           dest="new_cluster_domain_secret",
1397
                                           default=False, action="store_true",
1398
                                           help=("Create a new cluster domain"
1399
                                                 " secret"))
1400

    
1401
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1402
                              dest="use_replication_network",
1403
                              help="Whether to use the replication network"
1404
                              " for talking to the nodes",
1405
                              action="store_true", default=False)
1406

    
1407
MAINTAIN_NODE_HEALTH_OPT = \
1408
    cli_option("--maintain-node-health", dest="maintain_node_health",
1409
               metavar=_YORNO, default=None, type="bool",
1410
               help="Configure the cluster to automatically maintain node"
1411
               " health, by shutting down unknown instances, shutting down"
1412
               " unknown DRBD devices, etc.")
1413

    
1414
IDENTIFY_DEFAULTS_OPT = \
1415
    cli_option("--identify-defaults", dest="identify_defaults",
1416
               default=False, action="store_true",
1417
               help="Identify which saved instance parameters are equal to"
1418
               " the current cluster defaults and set them as such, instead"
1419
               " of marking them as overridden")
1420

    
1421
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1422
                         action="store", dest="uid_pool",
1423
                         help=("A list of user-ids or user-id"
1424
                               " ranges separated by commas"))
1425

    
1426
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1427
                          action="store", dest="add_uids",
1428
                          help=("A list of user-ids or user-id"
1429
                                " ranges separated by commas, to be"
1430
                                " added to the user-id pool"))
1431

    
1432
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1433
                             action="store", dest="remove_uids",
1434
                             help=("A list of user-ids or user-id"
1435
                                   " ranges separated by commas, to be"
1436
                                   " removed from the user-id pool"))
1437

    
1438
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1439
                              action="store", dest="reserved_lvs",
1440
                              help=("A comma-separated list of reserved"
1441
                                    " logical volumes names, that will be"
1442
                                    " ignored by cluster verify"))
1443

    
1444
ROMAN_OPT = cli_option("--roman",
1445
                       dest="roman_integers", default=False,
1446
                       action="store_true",
1447
                       help="Use roman numbers for positive integers")
1448

    
1449
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1450
                             action="store", default=None,
1451
                             help="Specifies usermode helper for DRBD")
1452

    
1453
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1454
                                action="store_false", default=True,
1455
                                help="Disable support for DRBD")
1456

    
1457
PRIMARY_IP_VERSION_OPT = \
1458
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1459
               action="store", dest="primary_ip_version",
1460
               metavar="%d|%d" % (constants.IP4_VERSION,
1461
                                  constants.IP6_VERSION),
1462
               help="Cluster-wide IP version for primary IP")
1463

    
1464
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1465
                              action="store_true",
1466
                              help="Show machine name for every line in output")
1467

    
1468
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1469
                              action="store_true",
1470
                              help=("Hide successful results and show failures"
1471
                                    " only (determined by the exit code)"))
1472

    
1473
REASON_OPT = cli_option("--reason", default=None,
1474
                        help="The reason for executing the command")
1475

    
1476

    
1477
def _PriorityOptionCb(option, _, value, parser):
1478
  """Callback for processing C{--priority} option.
1479

1480
  """
1481
  value = _PRIONAME_TO_VALUE[value]
1482

    
1483
  setattr(parser.values, option.dest, value)
1484

    
1485

    
1486
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1487
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1488
                          choices=_PRIONAME_TO_VALUE.keys(),
1489
                          action="callback", type="choice",
1490
                          callback=_PriorityOptionCb,
1491
                          help="Priority for opcode processing")
1492

    
1493
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1494
                        type="bool", default=None, metavar=_YORNO,
1495
                        help="Sets the hidden flag on the OS")
1496

    
1497
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1498
                        type="bool", default=None, metavar=_YORNO,
1499
                        help="Sets the blacklisted flag on the OS")
1500

    
1501
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1502
                                     type="bool", metavar=_YORNO,
1503
                                     dest="prealloc_wipe_disks",
1504
                                     help=("Wipe disks prior to instance"
1505
                                           " creation"))
1506

    
1507
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1508
                             type="keyval", default=None,
1509
                             help="Node parameters")
1510

    
1511
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1512
                              action="store", metavar="POLICY", default=None,
1513
                              help="Allocation policy for the node group")
1514

    
1515
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1516
                              type="bool", metavar=_YORNO,
1517
                              dest="node_powered",
1518
                              help="Specify if the SoR for node is powered")
1519

    
1520
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1521
                             default=constants.OOB_TIMEOUT,
1522
                             help="Maximum time to wait for out-of-band helper")
1523

    
1524
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1525
                             default=constants.OOB_POWER_DELAY,
1526
                             help="Time in seconds to wait between power-ons")
1527

    
1528
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1529
                              action="store_true", default=False,
1530
                              help=("Whether command argument should be treated"
1531
                                    " as filter"))
1532

    
1533
NO_REMEMBER_OPT = cli_option("--no-remember",
1534
                             dest="no_remember",
1535
                             action="store_true", default=False,
1536
                             help="Perform but do not record the change"
1537
                             " in the configuration")
1538

    
1539
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1540
                              default=False, action="store_true",
1541
                              help="Evacuate primary instances only")
1542

    
1543
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1544
                                default=False, action="store_true",
1545
                                help="Evacuate secondary instances only"
1546
                                     " (applies only to internally mirrored"
1547
                                     " disk templates, e.g. %s)" %
1548
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1549

    
1550
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1551
                                action="store_true", default=False,
1552
                                help="Pause instance at startup")
1553

    
1554
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1555
                          help="Destination node group (name or uuid)",
1556
                          default=None, action="append",
1557
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1558

    
1559
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1560
                               action="append", dest="ignore_errors",
1561
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1562
                               help="Error code to be ignored")
1563

    
1564
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1565
                            action="append",
1566
                            help=("Specify disk state information in the"
1567
                                  " format"
1568
                                  " storage_type/identifier:option=value,...;"
1569
                                  " note this is unused for now"),
1570
                            type="identkeyval")
1571

    
1572
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1573
                          action="append",
1574
                          help=("Specify hypervisor state information in the"
1575
                                " format hypervisor:option=value,...;"
1576
                                " note this is unused for now"),
1577
                          type="identkeyval")
1578

    
1579
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1580
                                action="store_true", default=False,
1581
                                help="Ignore instance policy violations")
1582

    
1583
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1584
                             help="Sets the instance's runtime memory,"
1585
                             " ballooning it up or down to the new value",
1586
                             default=None, type="unit", metavar="<size>")
1587

    
1588
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1589
                          action="store_true", default=False,
1590
                          help="Marks the grow as absolute instead of the"
1591
                          " (default) relative mode")
1592

    
1593
NETWORK_OPT = cli_option("--network",
1594
                         action="store", default=None, dest="network",
1595
                         help="IP network in CIDR notation")
1596

    
1597
GATEWAY_OPT = cli_option("--gateway",
1598
                         action="store", default=None, dest="gateway",
1599
                         help="IP address of the router (gateway)")
1600

    
1601
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1602
                                  action="store", default=None,
1603
                                  dest="add_reserved_ips",
1604
                                  help="Comma-separated list of"
1605
                                  " reserved IPs to add")
1606

    
1607
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1608
                                     action="store", default=None,
1609
                                     dest="remove_reserved_ips",
1610
                                     help="Comma-delimited list of"
1611
                                     " reserved IPs to remove")
1612

    
1613
NETWORK6_OPT = cli_option("--network6",
1614
                          action="store", default=None, dest="network6",
1615
                          help="IP network in CIDR notation")
1616

    
1617
GATEWAY6_OPT = cli_option("--gateway6",
1618
                          action="store", default=None, dest="gateway6",
1619
                          help="IP6 address of the router (gateway)")
1620

    
1621
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1622
                                  dest="conflicts_check",
1623
                                  default=True,
1624
                                  action="store_false",
1625
                                  help="Don't check for conflicting IPs")
1626

    
1627
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1628
                                 default=False, action="store_true",
1629
                                 help="Include default values")
1630

    
1631
#: Options provided by all commands
1632
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1633

    
1634
# common options for creating instances. add and import then add their own
1635
# specific ones.
1636
COMMON_CREATE_OPTS = [
1637
  BACKEND_OPT,
1638
  DISK_OPT,
1639
  DISK_TEMPLATE_OPT,
1640
  FILESTORE_DIR_OPT,
1641
  FILESTORE_DRIVER_OPT,
1642
  HYPERVISOR_OPT,
1643
  IALLOCATOR_OPT,
1644
  NET_OPT,
1645
  NODE_PLACEMENT_OPT,
1646
  NOIPCHECK_OPT,
1647
  NOCONFLICTSCHECK_OPT,
1648
  NONAMECHECK_OPT,
1649
  NONICS_OPT,
1650
  NWSYNC_OPT,
1651
  OSPARAMS_OPT,
1652
  OS_SIZE_OPT,
1653
  SUBMIT_OPT,
1654
  TAG_ADD_OPT,
1655
  DRY_RUN_OPT,
1656
  PRIORITY_OPT,
1657
  ]
1658

    
1659
# common instance policy options
1660
INSTANCE_POLICY_OPTS = [
1661
  IPOLICY_BOUNDS_SPECS_OPT,
1662
  IPOLICY_DISK_TEMPLATES,
1663
  IPOLICY_VCPU_RATIO,
1664
  IPOLICY_SPINDLE_RATIO,
1665
  ]
1666

    
1667
# instance policy split specs options
1668
SPLIT_ISPECS_OPTS = [
1669
  SPECS_CPU_COUNT_OPT,
1670
  SPECS_DISK_COUNT_OPT,
1671
  SPECS_DISK_SIZE_OPT,
1672
  SPECS_MEM_SIZE_OPT,
1673
  SPECS_NIC_COUNT_OPT,
1674
  ]
1675

    
1676

    
1677
class _ShowUsage(Exception):
1678
  """Exception class for L{_ParseArgs}.
1679

1680
  """
1681
  def __init__(self, exit_error):
1682
    """Initializes instances of this class.
1683

1684
    @type exit_error: bool
1685
    @param exit_error: Whether to report failure on exit
1686

1687
    """
1688
    Exception.__init__(self)
1689
    self.exit_error = exit_error
1690

    
1691

    
1692
class _ShowVersion(Exception):
1693
  """Exception class for L{_ParseArgs}.
1694

1695
  """
1696

    
1697

    
1698
def _ParseArgs(binary, argv, commands, aliases, env_override):
1699
  """Parser for the command line arguments.
1700

1701
  This function parses the arguments and returns the function which
1702
  must be executed together with its (modified) arguments.
1703

1704
  @param binary: Script name
1705
  @param argv: Command line arguments
1706
  @param commands: Dictionary containing command definitions
1707
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1708
  @param env_override: list of env variables allowed for default args
1709
  @raise _ShowUsage: If usage description should be shown
1710
  @raise _ShowVersion: If version should be shown
1711

1712
  """
1713
  assert not (env_override - set(commands))
1714
  assert not (set(aliases.keys()) & set(commands.keys()))
1715

    
1716
  if len(argv) > 1:
1717
    cmd = argv[1]
1718
  else:
1719
    # No option or command given
1720
    raise _ShowUsage(exit_error=True)
1721

    
1722
  if cmd == "--version":
1723
    raise _ShowVersion()
1724
  elif cmd == "--help":
1725
    raise _ShowUsage(exit_error=False)
1726
  elif not (cmd in commands or cmd in aliases):
1727
    raise _ShowUsage(exit_error=True)
1728

    
1729
  # get command, unalias it, and look it up in commands
1730
  if cmd in aliases:
1731
    if aliases[cmd] not in commands:
1732
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1733
                                   " command '%s'" % (cmd, aliases[cmd]))
1734

    
1735
    cmd = aliases[cmd]
1736

    
1737
  if cmd in env_override:
1738
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1739
    env_args = os.environ.get(args_env_name)
1740
    if env_args:
1741
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1742

    
1743
  func, args_def, parser_opts, usage, description = commands[cmd]
1744
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1745
                        description=description,
1746
                        formatter=TitledHelpFormatter(),
1747
                        usage="%%prog %s %s" % (cmd, usage))
1748
  parser.disable_interspersed_args()
1749
  options, args = parser.parse_args(args=argv[2:])
1750

    
1751
  if not _CheckArguments(cmd, args_def, args):
1752
    return None, None, None
1753

    
1754
  return func, options, args
1755

    
1756

    
1757
def _FormatUsage(binary, commands):
1758
  """Generates a nice description of all commands.
1759

1760
  @param binary: Script name
1761
  @param commands: Dictionary containing command definitions
1762

1763
  """
1764
  # compute the max line length for cmd + usage
1765
  mlen = min(60, max(map(len, commands)))
1766

    
1767
  yield "Usage: %s {command} [options...] [argument...]" % binary
1768
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1769
  yield ""
1770
  yield "Commands:"
1771

    
1772
  # and format a nice command list
1773
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1774
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1775
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1776
    for line in help_lines:
1777
      yield " %-*s   %s" % (mlen, "", line)
1778

    
1779
  yield ""
1780

    
1781

    
1782
def _CheckArguments(cmd, args_def, args):
1783
  """Verifies the arguments using the argument definition.
1784

1785
  Algorithm:
1786

1787
    1. Abort with error if values specified by user but none expected.
1788

1789
    1. For each argument in definition
1790

1791
      1. Keep running count of minimum number of values (min_count)
1792
      1. Keep running count of maximum number of values (max_count)
1793
      1. If it has an unlimited number of values
1794

1795
        1. Abort with error if it's not the last argument in the definition
1796

1797
    1. If last argument has limited number of values
1798

1799
      1. Abort with error if number of values doesn't match or is too large
1800

1801
    1. Abort with error if user didn't pass enough values (min_count)
1802

1803
  """
1804
  if args and not args_def:
1805
    ToStderr("Error: Command %s expects no arguments", cmd)
1806
    return False
1807

    
1808
  min_count = None
1809
  max_count = None
1810
  check_max = None
1811

    
1812
  last_idx = len(args_def) - 1
1813

    
1814
  for idx, arg in enumerate(args_def):
1815
    if min_count is None:
1816
      min_count = arg.min
1817
    elif arg.min is not None:
1818
      min_count += arg.min
1819

    
1820
    if max_count is None:
1821
      max_count = arg.max
1822
    elif arg.max is not None:
1823
      max_count += arg.max
1824

    
1825
    if idx == last_idx:
1826
      check_max = (arg.max is not None)
1827

    
1828
    elif arg.max is None:
1829
      raise errors.ProgrammerError("Only the last argument can have max=None")
1830

    
1831
  if check_max:
1832
    # Command with exact number of arguments
1833
    if (min_count is not None and max_count is not None and
1834
        min_count == max_count and len(args) != min_count):
1835
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1836
      return False
1837

    
1838
    # Command with limited number of arguments
1839
    if max_count is not None and len(args) > max_count:
1840
      ToStderr("Error: Command %s expects only %d argument(s)",
1841
               cmd, max_count)
1842
      return False
1843

    
1844
  # Command with some required arguments
1845
  if min_count is not None and len(args) < min_count:
1846
    ToStderr("Error: Command %s expects at least %d argument(s)",
1847
             cmd, min_count)
1848
    return False
1849

    
1850
  return True
1851

    
1852

    
1853
def SplitNodeOption(value):
1854
  """Splits the value of a --node option.
1855

1856
  """
1857
  if value and ":" in value:
1858
    return value.split(":", 1)
1859
  else:
1860
    return (value, None)
1861

    
1862

    
1863
def CalculateOSNames(os_name, os_variants):
1864
  """Calculates all the names an OS can be called, according to its variants.
1865

1866
  @type os_name: string
1867
  @param os_name: base name of the os
1868
  @type os_variants: list or None
1869
  @param os_variants: list of supported variants
1870
  @rtype: list
1871
  @return: list of valid names
1872

1873
  """
1874
  if os_variants:
1875
    return ["%s+%s" % (os_name, v) for v in os_variants]
1876
  else:
1877
    return [os_name]
1878

    
1879

    
1880
def ParseFields(selected, default):
1881
  """Parses the values of "--field"-like options.
1882

1883
  @type selected: string or None
1884
  @param selected: User-selected options
1885
  @type default: list
1886
  @param default: Default fields
1887

1888
  """
1889
  if selected is None:
1890
    return default
1891

    
1892
  if selected.startswith("+"):
1893
    return default + selected[1:].split(",")
1894

    
1895
  return selected.split(",")
1896

    
1897

    
1898
UsesRPC = rpc.RunWithRPC
1899

    
1900

    
1901
def AskUser(text, choices=None):
1902
  """Ask the user a question.
1903

1904
  @param text: the question to ask
1905

1906
  @param choices: list with elements tuples (input_char, return_value,
1907
      description); if not given, it will default to: [('y', True,
1908
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1909
      note that the '?' char is reserved for help
1910

1911
  @return: one of the return values from the choices list; if input is
1912
      not possible (i.e. not running with a tty, we return the last
1913
      entry from the list
1914

1915
  """
1916
  if choices is None:
1917
    choices = [("y", True, "Perform the operation"),
1918
               ("n", False, "Do not perform the operation")]
1919
  if not choices or not isinstance(choices, list):
1920
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1921
  for entry in choices:
1922
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1923
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1924

    
1925
  answer = choices[-1][1]
1926
  new_text = []
1927
  for line in text.splitlines():
1928
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1929
  text = "\n".join(new_text)
1930
  try:
1931
    f = file("/dev/tty", "a+")
1932
  except IOError:
1933
    return answer
1934
  try:
1935
    chars = [entry[0] for entry in choices]
1936
    chars[-1] = "[%s]" % chars[-1]
1937
    chars.append("?")
1938
    maps = dict([(entry[0], entry[1]) for entry in choices])
1939
    while True:
1940
      f.write(text)
1941
      f.write("\n")
1942
      f.write("/".join(chars))
1943
      f.write(": ")
1944
      line = f.readline(2).strip().lower()
1945
      if line in maps:
1946
        answer = maps[line]
1947
        break
1948
      elif line == "?":
1949
        for entry in choices:
1950
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1951
        f.write("\n")
1952
        continue
1953
  finally:
1954
    f.close()
1955
  return answer
1956

    
1957

    
1958
class JobSubmittedException(Exception):
1959
  """Job was submitted, client should exit.
1960

1961
  This exception has one argument, the ID of the job that was
1962
  submitted. The handler should print this ID.
1963

1964
  This is not an error, just a structured way to exit from clients.
1965

1966
  """
1967

    
1968

    
1969
def SendJob(ops, cl=None):
1970
  """Function to submit an opcode without waiting for the results.
1971

1972
  @type ops: list
1973
  @param ops: list of opcodes
1974
  @type cl: luxi.Client
1975
  @param cl: the luxi client to use for communicating with the master;
1976
             if None, a new client will be created
1977

1978
  """
1979
  if cl is None:
1980
    cl = GetClient()
1981

    
1982
  job_id = cl.SubmitJob(ops)
1983

    
1984
  return job_id
1985

    
1986

    
1987
def GenericPollJob(job_id, cbs, report_cbs):
1988
  """Generic job-polling function.
1989

1990
  @type job_id: number
1991
  @param job_id: Job ID
1992
  @type cbs: Instance of L{JobPollCbBase}
1993
  @param cbs: Data callbacks
1994
  @type report_cbs: Instance of L{JobPollReportCbBase}
1995
  @param report_cbs: Reporting callbacks
1996

1997
  """
1998
  prev_job_info = None
1999
  prev_logmsg_serial = None
2000

    
2001
  status = None
2002

    
2003
  while True:
2004
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2005
                                      prev_logmsg_serial)
2006
    if not result:
2007
      # job not found, go away!
2008
      raise errors.JobLost("Job with id %s lost" % job_id)
2009

    
2010
    if result == constants.JOB_NOTCHANGED:
2011
      report_cbs.ReportNotChanged(job_id, status)
2012

    
2013
      # Wait again
2014
      continue
2015

    
2016
    # Split result, a tuple of (field values, log entries)
2017
    (job_info, log_entries) = result
2018
    (status, ) = job_info
2019

    
2020
    if log_entries:
2021
      for log_entry in log_entries:
2022
        (serial, timestamp, log_type, message) = log_entry
2023
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2024
                                    log_type, message)
2025
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2026

    
2027
    # TODO: Handle canceled and archived jobs
2028
    elif status in (constants.JOB_STATUS_SUCCESS,
2029
                    constants.JOB_STATUS_ERROR,
2030
                    constants.JOB_STATUS_CANCELING,
2031
                    constants.JOB_STATUS_CANCELED):
2032
      break
2033

    
2034
    prev_job_info = job_info
2035

    
2036
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2037
  if not jobs:
2038
    raise errors.JobLost("Job with id %s lost" % job_id)
2039

    
2040
  status, opstatus, result = jobs[0]
2041

    
2042
  if status == constants.JOB_STATUS_SUCCESS:
2043
    return result
2044

    
2045
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2046
    raise errors.OpExecError("Job was canceled")
2047

    
2048
  has_ok = False
2049
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2050
    if status == constants.OP_STATUS_SUCCESS:
2051
      has_ok = True
2052
    elif status == constants.OP_STATUS_ERROR:
2053
      errors.MaybeRaise(msg)
2054

    
2055
      if has_ok:
2056
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2057
                                 (idx, msg))
2058

    
2059
      raise errors.OpExecError(str(msg))
2060

    
2061
  # default failure mode
2062
  raise errors.OpExecError(result)
2063

    
2064

    
2065
class JobPollCbBase:
2066
  """Base class for L{GenericPollJob} callbacks.
2067

2068
  """
2069
  def __init__(self):
2070
    """Initializes this class.
2071

2072
    """
2073

    
2074
  def WaitForJobChangeOnce(self, job_id, fields,
2075
                           prev_job_info, prev_log_serial):
2076
    """Waits for changes on a job.
2077

2078
    """
2079
    raise NotImplementedError()
2080

    
2081
  def QueryJobs(self, job_ids, fields):
2082
    """Returns the selected fields for the selected job IDs.
2083

2084
    @type job_ids: list of numbers
2085
    @param job_ids: Job IDs
2086
    @type fields: list of strings
2087
    @param fields: Fields
2088

2089
    """
2090
    raise NotImplementedError()
2091

    
2092

    
2093
class JobPollReportCbBase:
2094
  """Base class for L{GenericPollJob} reporting callbacks.
2095

2096
  """
2097
  def __init__(self):
2098
    """Initializes this class.
2099

2100
    """
2101

    
2102
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2103
    """Handles a log message.
2104

2105
    """
2106
    raise NotImplementedError()
2107

    
2108
  def ReportNotChanged(self, job_id, status):
2109
    """Called for if a job hasn't changed in a while.
2110

2111
    @type job_id: number
2112
    @param job_id: Job ID
2113
    @type status: string or None
2114
    @param status: Job status if available
2115

2116
    """
2117
    raise NotImplementedError()
2118

    
2119

    
2120
class _LuxiJobPollCb(JobPollCbBase):
2121
  def __init__(self, cl):
2122
    """Initializes this class.
2123

2124
    """
2125
    JobPollCbBase.__init__(self)
2126
    self.cl = cl
2127

    
2128
  def WaitForJobChangeOnce(self, job_id, fields,
2129
                           prev_job_info, prev_log_serial):
2130
    """Waits for changes on a job.
2131

2132
    """
2133
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2134
                                        prev_job_info, prev_log_serial)
2135

    
2136
  def QueryJobs(self, job_ids, fields):
2137
    """Returns the selected fields for the selected job IDs.
2138

2139
    """
2140
    return self.cl.QueryJobs(job_ids, fields)
2141

    
2142

    
2143
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2144
  def __init__(self, feedback_fn):
2145
    """Initializes this class.
2146

2147
    """
2148
    JobPollReportCbBase.__init__(self)
2149

    
2150
    self.feedback_fn = feedback_fn
2151

    
2152
    assert callable(feedback_fn)
2153

    
2154
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2155
    """Handles a log message.
2156

2157
    """
2158
    self.feedback_fn((timestamp, log_type, log_msg))
2159

    
2160
  def ReportNotChanged(self, job_id, status):
2161
    """Called if a job hasn't changed in a while.
2162

2163
    """
2164
    # Ignore
2165

    
2166

    
2167
class StdioJobPollReportCb(JobPollReportCbBase):
2168
  def __init__(self):
2169
    """Initializes this class.
2170

2171
    """
2172
    JobPollReportCbBase.__init__(self)
2173

    
2174
    self.notified_queued = False
2175
    self.notified_waitlock = False
2176

    
2177
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2178
    """Handles a log message.
2179

2180
    """
2181
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2182
             FormatLogMessage(log_type, log_msg))
2183

    
2184
  def ReportNotChanged(self, job_id, status):
2185
    """Called if a job hasn't changed in a while.
2186

2187
    """
2188
    if status is None:
2189
      return
2190

    
2191
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2192
      ToStderr("Job %s is waiting in queue", job_id)
2193
      self.notified_queued = True
2194

    
2195
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2196
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2197
      self.notified_waitlock = True
2198

    
2199

    
2200
def FormatLogMessage(log_type, log_msg):
2201
  """Formats a job message according to its type.
2202

2203
  """
2204
  if log_type != constants.ELOG_MESSAGE:
2205
    log_msg = str(log_msg)
2206

    
2207
  return utils.SafeEncode(log_msg)
2208

    
2209

    
2210
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2211
  """Function to poll for the result of a job.
2212

2213
  @type job_id: job identified
2214
  @param job_id: the job to poll for results
2215
  @type cl: luxi.Client
2216
  @param cl: the luxi client to use for communicating with the master;
2217
             if None, a new client will be created
2218

2219
  """
2220
  if cl is None:
2221
    cl = GetClient()
2222

    
2223
  if reporter is None:
2224
    if feedback_fn:
2225
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2226
    else:
2227
      reporter = StdioJobPollReportCb()
2228
  elif feedback_fn:
2229
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2230

    
2231
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2232

    
2233

    
2234
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2235
  """Legacy function to submit an opcode.
2236

2237
  This is just a simple wrapper over the construction of the processor
2238
  instance. It should be extended to better handle feedback and
2239
  interaction functions.
2240

2241
  """
2242
  if cl is None:
2243
    cl = GetClient()
2244

    
2245
  SetGenericOpcodeOpts([op], opts)
2246

    
2247
  job_id = SendJob([op], cl=cl)
2248

    
2249
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2250
                       reporter=reporter)
2251

    
2252
  return op_results[0]
2253

    
2254

    
2255
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2256
  """Wrapper around SubmitOpCode or SendJob.
2257

2258
  This function will decide, based on the 'opts' parameter, whether to
2259
  submit and wait for the result of the opcode (and return it), or
2260
  whether to just send the job and print its identifier. It is used in
2261
  order to simplify the implementation of the '--submit' option.
2262

2263
  It will also process the opcodes if we're sending the via SendJob
2264
  (otherwise SubmitOpCode does it).
2265

2266
  """
2267
  if opts and opts.submit_only:
2268
    job = [op]
2269
    SetGenericOpcodeOpts(job, opts)
2270
    job_id = SendJob(job, cl=cl)
2271
    raise JobSubmittedException(job_id)
2272
  else:
2273
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2274

    
2275

    
2276
def _InitReasonTrail(op, opts):
2277
  """Builds the first part of the reason trail
2278

2279
  Builds the initial part of the reason trail, adding the user provided reason
2280
  (if it exists) and the name of the command starting the operation.
2281

2282
  @param op: the opcode the reason trail will be added to
2283
  @param opts: the command line options selected by the user
2284

2285
  """
2286
  assert len(sys.argv) >= 2
2287
  trail = []
2288

    
2289
  if opts.reason:
2290
    trail.append((constants.OPCODE_REASON_SRC_USER,
2291
                  opts.reason,
2292
                  utils.EpochNano()))
2293

    
2294
  binary = os.path.basename(sys.argv[0])
2295
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2296
  command = sys.argv[1]
2297
  trail.append((source, command, utils.EpochNano()))
2298
  op.reason = trail
2299

    
2300

    
2301
def SetGenericOpcodeOpts(opcode_list, options):
2302
  """Processor for generic options.
2303

2304
  This function updates the given opcodes based on generic command
2305
  line options (like debug, dry-run, etc.).
2306

2307
  @param opcode_list: list of opcodes
2308
  @param options: command line options or None
2309
  @return: None (in-place modification)
2310

2311
  """
2312
  if not options:
2313
    return
2314
  for op in opcode_list:
2315
    op.debug_level = options.debug
2316
    if hasattr(options, "dry_run"):
2317
      op.dry_run = options.dry_run
2318
    if getattr(options, "priority", None) is not None:
2319
      op.priority = options.priority
2320
    _InitReasonTrail(op, options)
2321

    
2322

    
2323
def GetClient(query=False):
2324
  """Connects to the a luxi socket and returns a client.
2325

2326
  @type query: boolean
2327
  @param query: this signifies that the client will only be
2328
      used for queries; if the build-time parameter
2329
      enable-split-queries is enabled, then the client will be
2330
      connected to the query socket instead of the masterd socket
2331

2332
  """
2333
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2334
  if override_socket:
2335
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2336
      address = pathutils.MASTER_SOCKET
2337
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2338
      address = pathutils.QUERY_SOCKET
2339
    else:
2340
      address = override_socket
2341
  elif query and constants.ENABLE_SPLIT_QUERY:
2342
    address = pathutils.QUERY_SOCKET
2343
  else:
2344
    address = None
2345
  # TODO: Cache object?
2346
  try:
2347
    client = luxi.Client(address=address)
2348
  except luxi.NoMasterError:
2349
    ss = ssconf.SimpleStore()
2350

    
2351
    # Try to read ssconf file
2352
    try:
2353
      ss.GetMasterNode()
2354
    except errors.ConfigurationError:
2355
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2356
                                 " not part of a cluster",
2357
                                 errors.ECODE_INVAL)
2358

    
2359
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2360
    if master != myself:
2361
      raise errors.OpPrereqError("This is not the master node, please connect"
2362
                                 " to node '%s' and rerun the command" %
2363
                                 master, errors.ECODE_INVAL)
2364
    raise
2365
  return client
2366

    
2367

    
2368
def FormatError(err):
2369
  """Return a formatted error message for a given error.
2370

2371
  This function takes an exception instance and returns a tuple
2372
  consisting of two values: first, the recommended exit code, and
2373
  second, a string describing the error message (not
2374
  newline-terminated).
2375

2376
  """
2377
  retcode = 1
2378
  obuf = StringIO()
2379
  msg = str(err)
2380
  if isinstance(err, errors.ConfigurationError):
2381
    txt = "Corrupt configuration file: %s" % msg
2382
    logging.error(txt)
2383
    obuf.write(txt + "\n")
2384
    obuf.write("Aborting.")
2385
    retcode = 2
2386
  elif isinstance(err, errors.HooksAbort):
2387
    obuf.write("Failure: hooks execution failed:\n")
2388
    for node, script, out in err.args[0]:
2389
      if out:
2390
        obuf.write("  node: %s, script: %s, output: %s\n" %
2391
                   (node, script, out))
2392
      else:
2393
        obuf.write("  node: %s, script: %s (no output)\n" %
2394
                   (node, script))
2395
  elif isinstance(err, errors.HooksFailure):
2396
    obuf.write("Failure: hooks general failure: %s" % msg)
2397
  elif isinstance(err, errors.ResolverError):
2398
    this_host = netutils.Hostname.GetSysName()
2399
    if err.args[0] == this_host:
2400
      msg = "Failure: can't resolve my own hostname ('%s')"
2401
    else:
2402
      msg = "Failure: can't resolve hostname '%s'"
2403
    obuf.write(msg % err.args[0])
2404
  elif isinstance(err, errors.OpPrereqError):
2405
    if len(err.args) == 2:
2406
      obuf.write("Failure: prerequisites not met for this"
2407
                 " operation:\nerror type: %s, error details:\n%s" %
2408
                 (err.args[1], err.args[0]))
2409
    else:
2410
      obuf.write("Failure: prerequisites not met for this"
2411
                 " operation:\n%s" % msg)
2412
  elif isinstance(err, errors.OpExecError):
2413
    obuf.write("Failure: command execution error:\n%s" % msg)
2414
  elif isinstance(err, errors.TagError):
2415
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2416
  elif isinstance(err, errors.JobQueueDrainError):
2417
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2418
               " accept new requests\n")
2419
  elif isinstance(err, errors.JobQueueFull):
2420
    obuf.write("Failure: the job queue is full and doesn't accept new"
2421
               " job submissions until old jobs are archived\n")
2422
  elif isinstance(err, errors.TypeEnforcementError):
2423
    obuf.write("Parameter Error: %s" % msg)
2424
  elif isinstance(err, errors.ParameterError):
2425
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2426
  elif isinstance(err, luxi.NoMasterError):
2427
    if err.args[0] == pathutils.MASTER_SOCKET:
2428
      daemon = "the master daemon"
2429
    elif err.args[0] == pathutils.QUERY_SOCKET:
2430
      daemon = "the config daemon"
2431
    else:
2432
      daemon = "socket '%s'" % str(err.args[0])
2433
    obuf.write("Cannot communicate with %s.\nIs the process running"
2434
               " and listening for connections?" % daemon)
2435
  elif isinstance(err, luxi.TimeoutError):
2436
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2437
               " been submitted and will continue to run even if the call"
2438
               " timed out. Useful commands in this situation are \"gnt-job"
2439
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2440
    obuf.write(msg)
2441
  elif isinstance(err, luxi.PermissionError):
2442
    obuf.write("It seems you don't have permissions to connect to the"
2443
               " master daemon.\nPlease retry as a different user.")
2444
  elif isinstance(err, luxi.ProtocolError):
2445
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2446
               "%s" % msg)
2447
  elif isinstance(err, errors.JobLost):
2448
    obuf.write("Error checking job status: %s" % msg)
2449
  elif isinstance(err, errors.QueryFilterParseError):
2450
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2451
    obuf.write("\n".join(err.GetDetails()))
2452
  elif isinstance(err, errors.GenericError):
2453
    obuf.write("Unhandled Ganeti error: %s" % msg)
2454
  elif isinstance(err, JobSubmittedException):
2455
    obuf.write("JobID: %s\n" % err.args[0])
2456
    retcode = 0
2457
  else:
2458
    obuf.write("Unhandled exception: %s" % msg)
2459
  return retcode, obuf.getvalue().rstrip("\n")
2460

    
2461

    
2462
def GenericMain(commands, override=None, aliases=None,
2463
                env_override=frozenset()):
2464
  """Generic main function for all the gnt-* commands.
2465

2466
  @param commands: a dictionary with a special structure, see the design doc
2467
                   for command line handling.
2468
  @param override: if not None, we expect a dictionary with keys that will
2469
                   override command line options; this can be used to pass
2470
                   options from the scripts to generic functions
2471
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2472
  @param env_override: list of environment names which are allowed to submit
2473
                       default args for commands
2474

2475
  """
2476
  # save the program name and the entire command line for later logging
2477
  if sys.argv:
2478
    binary = os.path.basename(sys.argv[0])
2479
    if not binary:
2480
      binary = sys.argv[0]
2481

    
2482
    if len(sys.argv) >= 2:
2483
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2484
    else:
2485
      logname = binary
2486

    
2487
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2488
  else:
2489
    binary = "<unknown program>"
2490
    cmdline = "<unknown>"
2491

    
2492
  if aliases is None:
2493
    aliases = {}
2494

    
2495
  try:
2496
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2497
                                       env_override)
2498
  except _ShowVersion:
2499
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2500
             constants.RELEASE_VERSION)
2501
    return constants.EXIT_SUCCESS
2502
  except _ShowUsage, err:
2503
    for line in _FormatUsage(binary, commands):
2504
      ToStdout(line)
2505

    
2506
    if err.exit_error:
2507
      return constants.EXIT_FAILURE
2508
    else:
2509
      return constants.EXIT_SUCCESS
2510
  except errors.ParameterError, err:
2511
    result, err_msg = FormatError(err)
2512
    ToStderr(err_msg)
2513
    return 1
2514

    
2515
  if func is None: # parse error
2516
    return 1
2517

    
2518
  if override is not None:
2519
    for key, val in override.iteritems():
2520
      setattr(options, key, val)
2521

    
2522
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2523
                     stderr_logging=True)
2524

    
2525
  logging.info("Command line: %s", cmdline)
2526

    
2527
  try:
2528
    result = func(options, args)
2529
  except (errors.GenericError, luxi.ProtocolError,
2530
          JobSubmittedException), err:
2531
    result, err_msg = FormatError(err)
2532
    logging.exception("Error during command processing")
2533
    ToStderr(err_msg)
2534
  except KeyboardInterrupt:
2535
    result = constants.EXIT_FAILURE
2536
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2537
             " might have been submitted and"
2538
             " will continue to run in the background.")
2539
  except IOError, err:
2540
    if err.errno == errno.EPIPE:
2541
      # our terminal went away, we'll exit
2542
      sys.exit(constants.EXIT_FAILURE)
2543
    else:
2544
      raise
2545

    
2546
  return result
2547

    
2548

    
2549
def ParseNicOption(optvalue):
2550
  """Parses the value of the --net option(s).
2551

2552
  """
2553
  try:
2554
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2555
  except (TypeError, ValueError), err:
2556
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2557
                               errors.ECODE_INVAL)
2558

    
2559
  nics = [{}] * nic_max
2560
  for nidx, ndict in optvalue:
2561
    nidx = int(nidx)
2562

    
2563
    if not isinstance(ndict, dict):
2564
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2565
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2566

    
2567
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2568

    
2569
    nics[nidx] = ndict
2570

    
2571
  return nics
2572

    
2573

    
2574
def GenericInstanceCreate(mode, opts, args):
2575
  """Add an instance to the cluster via either creation or import.
2576

2577
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2578
  @param opts: the command line options selected by the user
2579
  @type args: list
2580
  @param args: should contain only one element, the new instance name
2581
  @rtype: int
2582
  @return: the desired exit code
2583

2584
  """
2585
  instance = args[0]
2586

    
2587
  (pnode, snode) = SplitNodeOption(opts.node)
2588

    
2589
  hypervisor = None
2590
  hvparams = {}
2591
  if opts.hypervisor:
2592
    hypervisor, hvparams = opts.hypervisor
2593

    
2594
  if opts.nics:
2595
    nics = ParseNicOption(opts.nics)
2596
  elif opts.no_nics:
2597
    # no nics
2598
    nics = []
2599
  elif mode == constants.INSTANCE_CREATE:
2600
    # default of one nic, all auto
2601
    nics = [{}]
2602
  else:
2603
    # mode == import
2604
    nics = []
2605

    
2606
  if opts.disk_template == constants.DT_DISKLESS:
2607
    if opts.disks or opts.sd_size is not None:
2608
      raise errors.OpPrereqError("Diskless instance but disk"
2609
                                 " information passed", errors.ECODE_INVAL)
2610
    disks = []
2611
  else:
2612
    if (not opts.disks and not opts.sd_size
2613
        and mode == constants.INSTANCE_CREATE):
2614
      raise errors.OpPrereqError("No disk information specified",
2615
                                 errors.ECODE_INVAL)
2616
    if opts.disks and opts.sd_size is not None:
2617
      raise errors.OpPrereqError("Please use either the '--disk' or"
2618
                                 " '-s' option", errors.ECODE_INVAL)
2619
    if opts.sd_size is not None:
2620
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2621

    
2622
    if opts.disks:
2623
      try:
2624
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2625
      except ValueError, err:
2626
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2627
                                   errors.ECODE_INVAL)
2628
      disks = [{}] * disk_max
2629
    else:
2630
      disks = []
2631
    for didx, ddict in opts.disks:
2632
      didx = int(didx)
2633
      if not isinstance(ddict, dict):
2634
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2635
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2636
      elif constants.IDISK_SIZE in ddict:
2637
        if constants.IDISK_ADOPT in ddict:
2638
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2639
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2640
        try:
2641
          ddict[constants.IDISK_SIZE] = \
2642
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2643
        except ValueError, err:
2644
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2645
                                     (didx, err), errors.ECODE_INVAL)
2646
      elif constants.IDISK_ADOPT in ddict:
2647
        if mode == constants.INSTANCE_IMPORT:
2648
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2649
                                     " import", errors.ECODE_INVAL)
2650
        ddict[constants.IDISK_SIZE] = 0
2651
      else:
2652
        raise errors.OpPrereqError("Missing size or adoption source for"
2653
                                   " disk %d" % didx, errors.ECODE_INVAL)
2654
      disks[didx] = ddict
2655

    
2656
  if opts.tags is not None:
2657
    tags = opts.tags.split(",")
2658
  else:
2659
    tags = []
2660

    
2661
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2662
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2663

    
2664
  if mode == constants.INSTANCE_CREATE:
2665
    start = opts.start
2666
    os_type = opts.os
2667
    force_variant = opts.force_variant
2668
    src_node = None
2669
    src_path = None
2670
    no_install = opts.no_install
2671
    identify_defaults = False
2672
  elif mode == constants.INSTANCE_IMPORT:
2673
    start = False
2674
    os_type = None
2675
    force_variant = False
2676
    src_node = opts.src_node
2677
    src_path = opts.src_dir
2678
    no_install = None
2679
    identify_defaults = opts.identify_defaults
2680
  else:
2681
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2682

    
2683
  op = opcodes.OpInstanceCreate(instance_name=instance,
2684
                                disks=disks,
2685
                                disk_template=opts.disk_template,
2686
                                nics=nics,
2687
                                conflicts_check=opts.conflicts_check,
2688
                                pnode=pnode, snode=snode,
2689
                                ip_check=opts.ip_check,
2690
                                name_check=opts.name_check,
2691
                                wait_for_sync=opts.wait_for_sync,
2692
                                file_storage_dir=opts.file_storage_dir,
2693
                                file_driver=opts.file_driver,
2694
                                iallocator=opts.iallocator,
2695
                                hypervisor=hypervisor,
2696
                                hvparams=hvparams,
2697
                                beparams=opts.beparams,
2698
                                osparams=opts.osparams,
2699
                                mode=mode,
2700
                                start=start,
2701
                                os_type=os_type,
2702
                                force_variant=force_variant,
2703
                                src_node=src_node,
2704
                                src_path=src_path,
2705
                                tags=tags,
2706
                                no_install=no_install,
2707
                                identify_defaults=identify_defaults,
2708
                                ignore_ipolicy=opts.ignore_ipolicy)
2709

    
2710
  SubmitOrSend(op, opts)
2711
  return 0
2712

    
2713

    
2714
class _RunWhileClusterStoppedHelper:
2715
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2716

2717
  """
2718
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2719
    """Initializes this class.
2720

2721
    @type feedback_fn: callable
2722
    @param feedback_fn: Feedback function
2723
    @type cluster_name: string
2724
    @param cluster_name: Cluster name
2725
    @type master_node: string
2726
    @param master_node Master node name
2727
    @type online_nodes: list
2728
    @param online_nodes: List of names of online nodes
2729

2730
    """
2731
    self.feedback_fn = feedback_fn
2732
    self.cluster_name = cluster_name
2733
    self.master_node = master_node
2734
    self.online_nodes = online_nodes
2735

    
2736
    self.ssh = ssh.SshRunner(self.cluster_name)
2737

    
2738
    self.nonmaster_nodes = [name for name in online_nodes
2739
                            if name != master_node]
2740

    
2741
    assert self.master_node not in self.nonmaster_nodes
2742

    
2743
  def _RunCmd(self, node_name, cmd):
2744
    """Runs a command on the local or a remote machine.
2745

2746
    @type node_name: string
2747
    @param node_name: Machine name
2748
    @type cmd: list
2749
    @param cmd: Command
2750

2751
    """
2752
    if node_name is None or node_name == self.master_node:
2753
      # No need to use SSH
2754
      result = utils.RunCmd(cmd)
2755
    else:
2756
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2757
                            utils.ShellQuoteArgs(cmd))
2758

    
2759
    if result.failed:
2760
      errmsg = ["Failed to run command %s" % result.cmd]
2761
      if node_name:
2762
        errmsg.append("on node %s" % node_name)
2763
      errmsg.append(": exitcode %s and error %s" %
2764
                    (result.exit_code, result.output))
2765
      raise errors.OpExecError(" ".join(errmsg))
2766

    
2767
  def Call(self, fn, *args):
2768
    """Call function while all daemons are stopped.
2769

2770
    @type fn: callable
2771
    @param fn: Function to be called
2772

2773
    """
2774
    # Pause watcher by acquiring an exclusive lock on watcher state file
2775
    self.feedback_fn("Blocking watcher")
2776
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2777
    try:
2778
      # TODO: Currently, this just blocks. There's no timeout.
2779
      # TODO: Should it be a shared lock?
2780
      watcher_block.Exclusive(blocking=True)
2781

    
2782
      # Stop master daemons, so that no new jobs can come in and all running
2783
      # ones are finished
2784
      self.feedback_fn("Stopping master daemons")
2785
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2786
      try:
2787
        # Stop daemons on all nodes
2788
        for node_name in self.online_nodes:
2789
          self.feedback_fn("Stopping daemons on %s" % node_name)
2790
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2791

    
2792
        # All daemons are shut down now
2793
        try:
2794
          return fn(self, *args)
2795
        except Exception, err:
2796
          _, errmsg = FormatError(err)
2797
          logging.exception("Caught exception")
2798
          self.feedback_fn(errmsg)
2799
          raise
2800
      finally:
2801
        # Start cluster again, master node last
2802
        for node_name in self.nonmaster_nodes + [self.master_node]:
2803
          self.feedback_fn("Starting daemons on %s" % node_name)
2804
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2805
    finally:
2806
      # Resume watcher
2807
      watcher_block.Close()
2808

    
2809

    
2810
def RunWhileClusterStopped(feedback_fn, fn, *args):
2811
  """Calls a function while all cluster daemons are stopped.
2812

2813
  @type feedback_fn: callable
2814
  @param feedback_fn: Feedback function
2815
  @type fn: callable
2816
  @param fn: Function to be called when daemons are stopped
2817

2818
  """
2819
  feedback_fn("Gathering cluster information")
2820

    
2821
  # This ensures we're running on the master daemon
2822
  cl = GetClient()
2823

    
2824
  (cluster_name, master_node) = \
2825
    cl.QueryConfigValues(["cluster_name", "master_node"])
2826

    
2827
  online_nodes = GetOnlineNodes([], cl=cl)
2828

    
2829
  # Don't keep a reference to the client. The master daemon will go away.
2830
  del cl
2831

    
2832
  assert master_node in online_nodes
2833

    
2834
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2835
                                       online_nodes).Call(fn, *args)
2836

    
2837

    
2838
def GenerateTable(headers, fields, separator, data,
2839
                  numfields=None, unitfields=None,
2840
                  units=None):
2841
  """Prints a table with headers and different fields.
2842

2843
  @type headers: dict
2844
  @param headers: dictionary mapping field names to headers for
2845
      the table
2846
  @type fields: list
2847
  @param fields: the field names corresponding to each row in
2848
      the data field
2849
  @param separator: the separator to be used; if this is None,
2850
      the default 'smart' algorithm is used which computes optimal
2851
      field width, otherwise just the separator is used between
2852
      each field
2853
  @type data: list
2854
  @param data: a list of lists, each sublist being one row to be output
2855
  @type numfields: list
2856
  @param numfields: a list with the fields that hold numeric
2857
      values and thus should be right-aligned
2858
  @type unitfields: list
2859
  @param unitfields: a list with the fields that hold numeric
2860
      values that should be formatted with the units field
2861
  @type units: string or None
2862
  @param units: the units we should use for formatting, or None for
2863
      automatic choice (human-readable for non-separator usage, otherwise
2864
      megabytes); this is a one-letter string
2865

2866
  """
2867
  if units is None:
2868
    if separator:
2869
      units = "m"
2870
    else:
2871
      units = "h"
2872

    
2873
  if numfields is None:
2874
    numfields = []
2875
  if unitfields is None:
2876
    unitfields = []
2877

    
2878
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2879
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2880

    
2881
  format_fields = []
2882
  for field in fields:
2883
    if headers and field not in headers:
2884
      # TODO: handle better unknown fields (either revert to old
2885
      # style of raising exception, or deal more intelligently with
2886
      # variable fields)
2887
      headers[field] = field
2888
    if separator is not None:
2889
      format_fields.append("%s")
2890
    elif numfields.Matches(field):
2891
      format_fields.append("%*s")
2892
    else:
2893
      format_fields.append("%-*s")
2894

    
2895
  if separator is None:
2896
    mlens = [0 for name in fields]
2897
    format_str = " ".join(format_fields)
2898
  else:
2899
    format_str = separator.replace("%", "%%").join(format_fields)
2900

    
2901
  for row in data:
2902
    if row is None:
2903
      continue
2904
    for idx, val in enumerate(row):
2905
      if unitfields.Matches(fields[idx]):
2906
        try:
2907
          val = int(val)
2908
        except (TypeError, ValueError):
2909
          pass
2910
        else:
2911
          val = row[idx] = utils.FormatUnit(val, units)
2912
      val = row[idx] = str(val)
2913
      if separator is None:
2914
        mlens[idx] = max(mlens[idx], len(val))
2915

    
2916
  result = []
2917
  if headers:
2918
    args = []
2919
    for idx, name in enumerate(fields):
2920
      hdr = headers[name]
2921
      if separator is None:
2922
        mlens[idx] = max(mlens[idx], len(hdr))
2923
        args.append(mlens[idx])
2924
      args.append(hdr)
2925
    result.append(format_str % tuple(args))
2926

    
2927
  if separator is None:
2928
    assert len(mlens) == len(fields)
2929

    
2930
    if fields and not numfields.Matches(fields[-1]):
2931
      mlens[-1] = 0
2932

    
2933
  for line in data:
2934
    args = []
2935
    if line is None:
2936
      line = ["-" for _ in fields]
2937
    for idx in range(len(fields)):
2938
      if separator is None:
2939
        args.append(mlens[idx])
2940
      args.append(line[idx])
2941
    result.append(format_str % tuple(args))
2942

    
2943
  return result
2944

    
2945

    
2946
def _FormatBool(value):
2947
  """Formats a boolean value as a string.
2948

2949
  """
2950
  if value:
2951
    return "Y"
2952
  return "N"
2953

    
2954

    
2955
#: Default formatting for query results; (callback, align right)
2956
_DEFAULT_FORMAT_QUERY = {
2957
  constants.QFT_TEXT: (str, False),
2958
  constants.QFT_BOOL: (_FormatBool, False),
2959
  constants.QFT_NUMBER: (str, True),
2960
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2961
  constants.QFT_OTHER: (str, False),
2962
  constants.QFT_UNKNOWN: (str, False),
2963
  }
2964

    
2965

    
2966
def _GetColumnFormatter(fdef, override, unit):
2967
  """Returns formatting function for a field.
2968

2969
  @type fdef: L{objects.QueryFieldDefinition}
2970
  @type override: dict
2971
  @param override: Dictionary for overriding field formatting functions,
2972
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2973
  @type unit: string
2974
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2975
  @rtype: tuple; (callable, bool)
2976
  @return: Returns the function to format a value (takes one parameter) and a
2977
    boolean for aligning the value on the right-hand side
2978

2979
  """
2980
  fmt = override.get(fdef.name, None)
2981
  if fmt is not None:
2982
    return fmt
2983

    
2984
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2985

    
2986
  if fdef.kind == constants.QFT_UNIT:
2987
    # Can't keep this information in the static dictionary
2988
    return (lambda value: utils.FormatUnit(value, unit), True)
2989

    
2990
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2991
  if fmt is not None:
2992
    return fmt
2993

    
2994
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2995

    
2996

    
2997
class _QueryColumnFormatter:
2998
  """Callable class for formatting fields of a query.
2999

3000
  """
3001
  def __init__(self, fn, status_fn, verbose):
3002
    """Initializes this class.
3003

3004
    @type fn: callable
3005
    @param fn: Formatting function
3006
    @type status_fn: callable
3007
    @param status_fn: Function to report fields' status
3008
    @type verbose: boolean
3009
    @param verbose: whether to use verbose field descriptions or not
3010

3011
    """
3012
    self._fn = fn
3013
    self._status_fn = status_fn
3014
    self._verbose = verbose
3015

    
3016
  def __call__(self, data):
3017
    """Returns a field's string representation.
3018

3019
    """
3020
    (status, value) = data
3021

    
3022
    # Report status
3023
    self._status_fn(status)
3024

    
3025
    if status == constants.RS_NORMAL:
3026
      return self._fn(value)
3027

    
3028
    assert value is None, \
3029
           "Found value %r for abnormal status %s" % (value, status)
3030

    
3031
    return FormatResultError(status, self._verbose)
3032

    
3033

    
3034
def FormatResultError(status, verbose):
3035
  """Formats result status other than L{constants.RS_NORMAL}.
3036

3037
  @param status: The result status
3038
  @type verbose: boolean
3039
  @param verbose: Whether to return the verbose text
3040
  @return: Text of result status
3041

3042
  """
3043
  assert status != constants.RS_NORMAL, \
3044
         "FormatResultError called with status equal to constants.RS_NORMAL"
3045
  try:
3046
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3047
  except KeyError:
3048
    raise NotImplementedError("Unknown status %s" % status)
3049
  else:
3050
    if verbose:
3051
      return verbose_text
3052
    return normal_text
3053

    
3054

    
3055
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3056
                      header=False, verbose=False):
3057
  """Formats data in L{objects.QueryResponse}.
3058

3059
  @type result: L{objects.QueryResponse}
3060
  @param result: result of query operation
3061
  @type unit: string
3062
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3063
    see L{utils.text.FormatUnit}
3064
  @type format_override: dict
3065
  @param format_override: Dictionary for overriding field formatting functions,
3066
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3067
  @type separator: string or None
3068
  @param separator: String used to separate fields
3069
  @type header: bool
3070
  @param header: Whether to output header row
3071
  @type verbose: boolean
3072
  @param verbose: whether to use verbose field descriptions or not
3073

3074
  """
3075
  if unit is None:
3076
    if separator:
3077
      unit = "m"
3078
    else:
3079
      unit = "h"
3080

    
3081
  if format_override is None:
3082
    format_override = {}
3083

    
3084
  stats = dict.fromkeys(constants.RS_ALL, 0)
3085

    
3086
  def _RecordStatus(status):
3087
    if status in stats:
3088
      stats[status] += 1
3089

    
3090
  columns = []
3091
  for fdef in result.fields:
3092
    assert fdef.title and fdef.name
3093
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3094
    columns.append(TableColumn(fdef.title,
3095
                               _QueryColumnFormatter(fn, _RecordStatus,
3096
                                                     verbose),
3097
                               align_right))
3098

    
3099
  table = FormatTable(result.data, columns, header, separator)
3100

    
3101
  # Collect statistics
3102
  assert len(stats) == len(constants.RS_ALL)
3103
  assert compat.all(count >= 0 for count in stats.values())
3104

    
3105
  # Determine overall status. If there was no data, unknown fields must be
3106
  # detected via the field definitions.
3107
  if (stats[constants.RS_UNKNOWN] or
3108
      (not result.data and _GetUnknownFields(result.fields))):
3109
    status = QR_UNKNOWN
3110
  elif compat.any(count > 0 for key, count in stats.items()
3111
                  if key != constants.RS_NORMAL):
3112
    status = QR_INCOMPLETE
3113
  else:
3114
    status = QR_NORMAL
3115

    
3116
  return (status, table)
3117

    
3118

    
3119
def _GetUnknownFields(fdefs):
3120
  """Returns list of unknown fields included in C{fdefs}.
3121

3122
  @type fdefs: list of L{objects.QueryFieldDefinition}
3123

3124
  """
3125
  return [fdef for fdef in fdefs
3126
          if fdef.kind == constants.QFT_UNKNOWN]
3127

    
3128

    
3129
def _WarnUnknownFields(fdefs):
3130
  """Prints a warning to stderr if a query included unknown fields.
3131

3132
  @type fdefs: list of L{objects.QueryFieldDefinition}
3133

3134
  """
3135
  unknown = _GetUnknownFields(fdefs)
3136
  if unknown:
3137
    ToStderr("Warning: Queried for unknown fields %s",
3138
             utils.CommaJoin(fdef.name for fdef in unknown))
3139
    return True
3140

    
3141
  return False
3142

    
3143

    
3144
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3145
                format_override=None, verbose=False, force_filter=False,
3146
                namefield=None, qfilter=None, isnumeric=False):
3147
  """Generic implementation for listing all items of a resource.
3148

3149
  @param resource: One of L{constants.QR_VIA_LUXI}
3150
  @type fields: list of strings
3151
  @param fields: List of fields to query for
3152
  @type names: list of strings
3153
  @param names: Names of items to query for
3154
  @type unit: string or None
3155
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3156
    None for automatic choice (human-readable for non-separator usage,
3157
    otherwise megabytes); this is a one-letter string
3158
  @type separator: string or None
3159
  @param separator: String used to separate fields
3160
  @type header: bool
3161
  @param header: Whether to show header row
3162
  @type force_filter: bool
3163
  @param force_filter: Whether to always treat names as filter
3164
  @type format_override: dict
3165
  @param format_override: Dictionary for overriding field formatting functions,
3166
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3167
  @type verbose: boolean
3168
  @param verbose: whether to use verbose field descriptions or not
3169
  @type namefield: string
3170
  @param namefield: Name of field to use for simple filters (see
3171
    L{qlang.MakeFilter} for details)
3172
  @type qfilter: list or None
3173
  @param qfilter: Query filter (in addition to names)
3174
  @param isnumeric: bool
3175
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3176
    any simple filters built by namefield should use integer values to
3177
    reflect that
3178

3179
  """
3180
  if not names:
3181
    names = None
3182

    
3183
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3184
                                isnumeric=isnumeric)
3185

    
3186
  if qfilter is None:
3187
    qfilter = namefilter
3188
  elif namefilter is not None:
3189
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3190

    
3191
  if cl is None:
3192
    cl = GetClient()
3193

    
3194
  response = cl.Query(resource, fields, qfilter)
3195

    
3196
  found_unknown = _WarnUnknownFields(response.fields)
3197

    
3198
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3199
                                     header=header,
3200
                                     format_override=format_override,
3201
                                     verbose=verbose)
3202

    
3203
  for line in data:
3204
    ToStdout(line)
3205

    
3206
  assert ((found_unknown and status == QR_UNKNOWN) or
3207
          (not found_unknown and status != QR_UNKNOWN))
3208

    
3209
  if status == QR_UNKNOWN:
3210
    return constants.EXIT_UNKNOWN_FIELD
3211

    
3212
  # TODO: Should the list command fail if not all data could be collected?
3213
  return constants.EXIT_SUCCESS
3214

    
3215

    
3216
def _FieldDescValues(fdef):
3217
  """Helper function for L{GenericListFields} to get query field description.
3218

3219
  @type fdef: L{objects.QueryFieldDefinition}
3220
  @rtype: list
3221

3222
  """
3223
  return [
3224
    fdef.name,
3225
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3226
    fdef.title,
3227
    fdef.doc,
3228
    ]
3229

    
3230

    
3231
def GenericListFields(resource, fields, separator, header, cl=None):
3232
  """Generic implementation for listing fields for a resource.
3233

3234
  @param resource: One of L{constants.QR_VIA_LUXI}
3235
  @type fields: list of strings
3236
  @param fields: List of fields to query for
3237
  @type separator: string or None
3238
  @param separator: String used to separate fields
3239
  @type header: bool
3240
  @param header: Whether to show header row
3241

3242
  """
3243
  if cl is None:
3244
    cl = GetClient()
3245

    
3246
  if not fields:
3247
    fields = None
3248

    
3249
  response = cl.QueryFields(resource, fields)
3250

    
3251
  found_unknown = _WarnUnknownFields(response.fields)
3252

    
3253
  columns = [
3254
    TableColumn("Name", str, False),
3255
    TableColumn("Type", str, False),
3256
    TableColumn("Title", str, False),
3257
    TableColumn("Description", str, False),
3258
    ]
3259

    
3260
  rows = map(_FieldDescValues, response.fields)
3261

    
3262
  for line in FormatTable(rows, columns, header, separator):
3263
    ToStdout(line)
3264

    
3265
  if found_unknown:
3266
    return constants.EXIT_UNKNOWN_FIELD
3267

    
3268
  return constants.EXIT_SUCCESS
3269

    
3270

    
3271
class TableColumn:
3272
  """Describes a column for L{FormatTable}.
3273

3274
  """
3275
  def __init__(self, title, fn, align_right):
3276
    """Initializes this class.
3277

3278
    @type title: string
3279
    @param title: Column title
3280
    @type fn: callable
3281
    @param fn: Formatting function
3282
    @type align_right: bool
3283
    @param align_right: Whether to align values on the right-hand side
3284

3285
    """
3286
    self.title = title
3287
    self.format = fn
3288
    self.align_right = align_right
3289

    
3290

    
3291
def _GetColFormatString(width, align_right):
3292
  """Returns the format string for a field.
3293

3294
  """
3295
  if align_right:
3296
    sign = ""
3297
  else:
3298
    sign = "-"
3299

    
3300
  return "%%%s%ss" % (sign, width)
3301

    
3302

    
3303
def FormatTable(rows, columns, header, separator):
3304
  """Formats data as a table.
3305

3306
  @type rows: list of lists
3307
  @param rows: Row data, one list per row
3308
  @type columns: list of L{TableColumn}
3309
  @param columns: Column descriptions
3310
  @type header: bool
3311
  @param header: Whether to show header row
3312
  @type separator: string or None
3313
  @param separator: String used to separate columns
3314

3315
  """
3316
  if header:
3317
    data = [[col.title for col in columns]]
3318
    colwidth = [len(col.title) for col in columns]
3319
  else:
3320
    data = []
3321
    colwidth = [0 for _ in columns]
3322

    
3323
  # Format row data
3324
  for row in rows:
3325
    assert len(row) == len(columns)
3326

    
3327
    formatted = [col.format(value) for value, col in zip(row, columns)]
3328

    
3329
    if separator is None:
3330
      # Update column widths
3331
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3332
        # Modifying a list's items while iterating is fine
3333
        colwidth[idx] = max(oldwidth, len(value))
3334

    
3335
    data.append(formatted)
3336

    
3337
  if separator is not None:
3338
    # Return early if a separator is used
3339
    return [separator.join(row) for row in data]
3340

    
3341
  if columns and not columns[-1].align_right:
3342
    # Avoid unnecessary spaces at end of line
3343
    colwidth[-1] = 0
3344

    
3345
  # Build format string
3346
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3347
                  for col, width in zip(columns, colwidth)])
3348

    
3349
  return [fmt % tuple(row) for row in data]
3350

    
3351

    
3352
def FormatTimestamp(ts):
3353
  """Formats a given timestamp.
3354

3355
  @type ts: timestamp
3356
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3357

3358
  @rtype: string
3359
  @return: a string with the formatted timestamp
3360

3361
  """
3362
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3363
    return "?"
3364

    
3365
  (sec, usecs) = ts
3366
  return utils.FormatTime(sec, usecs=usecs)
3367

    
3368

    
3369
def ParseTimespec(value):
3370
  """Parse a time specification.
3371

3372
  The following suffixed will be recognized:
3373

3374
    - s: seconds
3375
    - m: minutes
3376
    - h: hours
3377
    - d: day
3378
    - w: weeks
3379

3380
  Without any suffix, the value will be taken to be in seconds.
3381

3382
  """
3383
  value = str(value)
3384
  if not value:
3385
    raise errors.OpPrereqError("Empty time specification passed",
3386
                               errors.ECODE_INVAL)
3387
  suffix_map = {
3388
    "s": 1,
3389
    "m": 60,
3390
    "h": 3600,
3391
    "d": 86400,
3392
    "w": 604800,
3393
    }
3394
  if value[-1] not in suffix_map:
3395
    try:
3396
      value = int(value)
3397
    except (TypeError, ValueError):
3398
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3399
                                 errors.ECODE_INVAL)
3400
  else:
3401
    multiplier = suffix_map[value[-1]]
3402
    value = value[:-1]
3403
    if not value: # no data left after stripping the suffix
3404
      raise errors.OpPrereqError("Invalid time specification (only"
3405
                                 " suffix passed)", errors.ECODE_INVAL)
3406
    try:
3407
      value = int(value) * multiplier
3408
    except (TypeError, ValueError):
3409
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3410
                                 errors.ECODE_INVAL)
3411
  return value
3412

    
3413

    
3414
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3415
                   filter_master=False, nodegroup=None):
3416
  """Returns the names of online nodes.
3417

3418
  This function will also log a warning on stderr with the names of
3419
  the online nodes.
3420

3421
  @param nodes: if not empty, use only this subset of nodes (minus the
3422
      offline ones)
3423
  @param cl: if not None, luxi client to use
3424
  @type nowarn: boolean
3425
  @param nowarn: by default, this function will output a note with the
3426
      offline nodes that are skipped; if this parameter is True the
3427
      note is not displayed
3428
  @type secondary_ips: boolean
3429
  @param secondary_ips: if True, return the secondary IPs instead of the
3430
      names, useful for doing network traffic over the replication interface
3431
      (if any)
3432
  @type filter_master: boolean
3433
  @param filter_master: if True, do not return the master node in the list
3434
      (useful in coordination with secondary_ips where we cannot check our
3435
      node name against the list)
3436
  @type nodegroup: string
3437
  @param nodegroup: If set, only return nodes in this node group
3438

3439
  """
3440
  if cl is None:
3441
    cl = GetClient()
3442

    
3443
  qfilter = []
3444

    
3445
  if nodes:
3446
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3447

    
3448
  if nodegroup is not None:
3449
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3450
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3451

    
3452
  if filter_master:
3453
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3454

    
3455
  if qfilter:
3456
    if len(qfilter) > 1:
3457
      final_filter = [qlang.OP_AND] + qfilter
3458
    else:
3459
      assert len(qfilter) == 1
3460
      final_filter = qfilter[0]
3461
  else:
3462
    final_filter = None
3463

    
3464
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3465

    
3466
  def _IsOffline(row):
3467
    (_, (_, offline), _) = row
3468
    return offline
3469

    
3470
  def _GetName(row):
3471
    ((_, name), _, _) = row
3472
    return name
3473

    
3474
  def _GetSip(row):
3475
    (_, _, (_, sip)) = row
3476
    return sip
3477

    
3478
  (offline, online) = compat.partition(result.data, _IsOffline)
3479

    
3480
  if offline and not nowarn:
3481
    ToStderr("Note: skipping offline node(s): %s" %
3482
             utils.CommaJoin(map(_GetName, offline)))
3483

    
3484
  if secondary_ips:
3485
    fn = _GetSip
3486
  else:
3487
    fn = _GetName
3488

    
3489
  return map(fn, online)
3490

    
3491

    
3492
def _ToStream(stream, txt, *args):
3493
  """Write a message to a stream, bypassing the logging system
3494

3495
  @type stream: file object
3496
  @param stream: the file to which we should write
3497
  @type txt: str
3498
  @param txt: the message
3499

3500
  """
3501
  try:
3502
    if args:
3503
      args = tuple(args)
3504
      stream.write(txt % args)
3505
    else:
3506
      stream.write(txt)
3507
    stream.write("\n")
3508
    stream.flush()
3509
  except IOError, err:
3510
    if err.errno == errno.EPIPE:
3511
      # our terminal went away, we'll exit
3512
      sys.exit(constants.EXIT_FAILURE)
3513
    else:
3514
      raise
3515

    
3516

    
3517
def ToStdout(txt, *args):
3518
  """Write a message to stdout only, bypassing the logging system
3519

3520
  This is just a wrapper over _ToStream.
3521

3522
  @type txt: str
3523
  @param txt: the message
3524

3525
  """
3526
  _ToStream(sys.stdout, txt, *args)
3527

    
3528

    
3529
def ToStderr(txt, *args):
3530
  """Write a message to stderr only, bypassing the logging system
3531

3532
  This is just a wrapper over _ToStream.
3533

3534
  @type txt: str
3535
  @param txt: the message
3536

3537
  """
3538
  _ToStream(sys.stderr, txt, *args)
3539

    
3540

    
3541
class JobExecutor(object):
3542
  """Class which manages the submission and execution of multiple jobs.
3543

3544
  Note that instances of this class should not be reused between
3545
  GetResults() calls.
3546

3547
  """
3548
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3549
    self.queue = []
3550
    if cl is None:
3551
      cl = GetClient()
3552
    self.cl = cl
3553
    self.verbose = verbose
3554
    self.jobs = []
3555
    self.opts = opts
3556
    self.feedback_fn = feedback_fn
3557
    self._counter = itertools.count()
3558

    
3559
  @staticmethod
3560
  def _IfName(name, fmt):
3561
    """Helper function for formatting name.
3562

3563
    """
3564
    if name:
3565
      return fmt % name
3566

    
3567
    return ""
3568

    
3569
  def QueueJob(self, name, *ops):
3570
    """Record a job for later submit.
3571

3572
    @type name: string
3573
    @param name: a description of the job, will be used in WaitJobSet
3574

3575
    """
3576
    SetGenericOpcodeOpts(ops, self.opts)
3577
    self.queue.append((self._counter.next(), name, ops))
3578

    
3579
  def AddJobId(self, name, status, job_id):
3580
    """Adds a job ID to the internal queue.
3581

3582
    """
3583
    self.jobs.append((self._counter.next(), status, job_id, name))
3584

    
3585
  def SubmitPending(self, each=False):
3586
    """Submit all pending jobs.
3587

3588
    """
3589
    if each:
3590
      results = []
3591
      for (_, _, ops) in self.queue:
3592
        # SubmitJob will remove the success status, but raise an exception if
3593
        # the submission fails, so we'll notice that anyway.
3594
        results.append([True, self.cl.SubmitJob(ops)[0]])
3595
    else:
3596
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3597
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3598
      self.jobs.append((idx, status, data, name))
3599

    
3600
  def _ChooseJob(self):
3601
    """Choose a non-waiting/queued job to poll next.
3602

3603
    """
3604
    assert self.jobs, "_ChooseJob called with empty job list"
3605

    
3606
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3607
                               ["status"])
3608
    assert result
3609

    
3610
    for job_data, status in zip(self.jobs, result):
3611
      if (isinstance(status, list) and status and
3612
          status[0] in (constants.JOB_STATUS_QUEUED,
3613
                        constants.JOB_STATUS_WAITING,
3614
                        constants.JOB_STATUS_CANCELING)):
3615
        # job is still present and waiting
3616
        continue
3617
      # good candidate found (either running job or lost job)
3618
      self.jobs.remove(job_data)
3619
      return job_data
3620

    
3621
    # no job found
3622
    return self.jobs.pop(0)
3623

    
3624
  def GetResults(self):
3625
    """Wait for and return the results of all jobs.
3626

3627
    @rtype: list
3628
    @return: list of tuples (success, job results), in the same order
3629
        as the submitted jobs; if a job has failed, instead of the result
3630
        there will be the error message
3631

3632
    """
3633
    if not self.jobs:
3634
      self.SubmitPending()
3635
    results = []
3636
    if self.verbose:
3637
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3638
      if ok_jobs:
3639
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3640

    
3641
    # first, remove any non-submitted jobs
3642
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3643
    for idx, _, jid, name in failures:
3644
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3645
      results.append((idx, False, jid))
3646

    
3647
    while self.jobs:
3648
      (idx, _, jid, name) = self._ChooseJob()
3649
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3650
      try:
3651
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3652
        success = True
3653
      except errors.JobLost, err:
3654
        _, job_result = FormatError(err)
3655
        ToStderr("Job %s%s has been archived, cannot check its result",
3656
                 jid, self._IfName(name, " for %s"))
3657
        success = False
3658
      except (errors.GenericError, luxi.ProtocolError), err:
3659
        _, job_result = FormatError(err)
3660
        success = False
3661
        # the error message will always be shown, verbose or not
3662
        ToStderr("Job %s%s has failed: %s",
3663
                 jid, self._IfName(name, " for %s"), job_result)
3664

    
3665
      results.append((idx, success, job_result))
3666

    
3667
    # sort based on the index, then drop it
3668
    results.sort()
3669
    results = [i[1:] for i in results]
3670

    
3671
    return results
3672

    
3673
  def WaitOrShow(self, wait):
3674
    """Wait for job results or only print the job IDs.
3675

3676
    @type wait: boolean
3677
    @param wait: whether to wait or not
3678

3679
    """
3680
    if wait:
3681
      return self.GetResults()
3682
    else:
3683
      if not self.jobs:
3684
        self.SubmitPending()
3685
      for _, status, result, name in self.jobs:
3686
        if status:
3687
          ToStdout("%s: %s", result, name)
3688
        else:
3689
          ToStderr("Failure for %s: %s", name, result)
3690
      return [row[1:3] for row in self.jobs]
3691

    
3692

    
3693
def FormatParamsDictInfo(param_dict, actual):
3694
  """Formats a parameter dictionary.
3695

3696
  @type param_dict: dict
3697
  @param param_dict: the own parameters
3698
  @type actual: dict
3699
  @param actual: the current parameter set (including defaults)
3700
  @rtype: dict
3701
  @return: dictionary where the value of each parameter is either a fully
3702
      formatted string or a dictionary containing formatted strings
3703

3704
  """
3705
  ret = {}
3706
  for (key, data) in actual.items():
3707
    if isinstance(data, dict) and data:
3708
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3709
    else:
3710
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3711
  return ret
3712

    
3713

    
3714
def _FormatListInfoDefault(data, def_data):
3715
  if data is not None:
3716
    ret = utils.CommaJoin(data)
3717
  else:
3718
    ret = "default (%s)" % utils.CommaJoin(def_data)
3719
  return ret
3720

    
3721

    
3722
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3723
  """Formats an instance policy.
3724

3725
  @type custom_ipolicy: dict
3726
  @param custom_ipolicy: own policy
3727
  @type eff_ipolicy: dict
3728
  @param eff_ipolicy: effective policy (including defaults); ignored for
3729
      cluster
3730
  @type iscluster: bool
3731
  @param iscluster: the policy is at cluster level
3732
  @rtype: list of pairs
3733
  @return: formatted data, suitable for L{PrintGenericInfo}
3734

3735
  """
3736
  if iscluster:
3737
    eff_ipolicy = custom_ipolicy
3738

    
3739
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX, {})
3740
  ret = [
3741
    (key,
3742
     FormatParamsDictInfo(custom_minmax.get(key, {}),
3743
                          eff_ipolicy[constants.ISPECS_MINMAX][key]))
3744
    for key in constants.ISPECS_MINMAX_KEYS
3745
    ]
3746
  if iscluster:
3747
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3748
    ret.append(
3749
      (constants.ISPECS_STD,
3750
       FormatParamsDictInfo(stdspecs, stdspecs))
3751
      )
3752

    
3753
  ret.append(
3754
    ("enabled disk templates",
3755
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3756
                            eff_ipolicy[constants.IPOLICY_DTS]))
3757
    )
3758
  ret.extend([
3759
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3760
    for key in constants.IPOLICY_PARAMETERS
3761
    ])
3762
  return ret
3763

    
3764

    
3765
def _PrintSpecsParameters(buf, specs):
3766
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3767
  buf.write(",".join(values))
3768

    
3769

    
3770
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3771
  """Print the command option used to generate the given instance policy.
3772

3773
  Currently only the parts dealing with specs are supported.
3774

3775
  @type buf: StringIO
3776
  @param buf: stream to write into
3777
  @type ipolicy: dict
3778
  @param ipolicy: instance policy
3779
  @type isgroup: bool
3780
  @param isgroup: whether the policy is at group level
3781

3782
  """
3783
  if not isgroup:
3784
    stdspecs = ipolicy.get("std")
3785
    if stdspecs:
3786
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3787
      _PrintSpecsParameters(buf, stdspecs)
3788
  minmax = ipolicy.get("minmax")
3789
  if minmax:
3790
    minspecs = minmax.get("min")
3791
    maxspecs = minmax.get("max")
3792
    if minspecs and maxspecs:
3793
      buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3794
      buf.write("min:")
3795
      _PrintSpecsParameters(buf, minspecs)
3796
      buf.write("/max:")
3797
      _PrintSpecsParameters(buf, maxspecs)
3798

    
3799

    
3800
def ConfirmOperation(names, list_type, text, extra=""):
3801
  """Ask the user to confirm an operation on a list of list_type.
3802

3803
  This function is used to request confirmation for doing an operation
3804
  on a given list of list_type.
3805

3806
  @type names: list
3807
  @param names: the list of names that we display when
3808
      we ask for confirmation
3809
  @type list_type: str
3810
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3811
  @type text: str
3812
  @param text: the operation that the user should confirm
3813
  @rtype: boolean
3814
  @return: True or False depending on user's confirmation.
3815

3816
  """
3817
  count = len(names)
3818
  msg = ("The %s will operate on %d %s.\n%s"
3819
         "Do you want to continue?" % (text, count, list_type, extra))
3820
  affected = (("\nAffected %s:\n" % list_type) +
3821
              "\n".join(["  %s" % name for name in names]))
3822

    
3823
  choices = [("y", True, "Yes, execute the %s" % text),
3824
             ("n", False, "No, abort the %s" % text)]
3825

    
3826
  if count > 20:
3827
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3828
    question = msg
3829
  else:
3830
    question = msg + affected
3831

    
3832
  choice = AskUser(question, choices)
3833
  if choice == "v":
3834
    choices.pop(1)
3835
    choice = AskUser(msg + affected, choices)
3836
  return choice
3837

    
3838

    
3839
def _MaybeParseUnit(elements):
3840
  """Parses and returns an array of potential values with units.
3841

3842
  """
3843
  parsed = {}
3844
  for k, v in elements.items():
3845
    if v == constants.VALUE_DEFAULT:
3846
      parsed[k] = v
3847
    else:
3848
      parsed[k] = utils.ParseUnit(v)
3849
  return parsed
3850

    
3851

    
3852
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3853
                             ispecs_disk_count, ispecs_disk_size,
3854
                             ispecs_nic_count, group_ipolicy, fill_all):
3855
  try:
3856
    if ispecs_mem_size:
3857
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3858
    if ispecs_disk_size:
3859
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3860
  except (TypeError, ValueError, errors.UnitParseError), err:
3861
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3862
                               " in policy: %s" %
3863
                               (ispecs_disk_size, ispecs_mem_size, err),
3864
                               errors.ECODE_INVAL)
3865

    
3866
  # prepare ipolicy dict
3867
  ispecs_transposed = {
3868
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3869
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3870
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3871
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3872
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3873
    }
3874

    
3875
  # first, check that the values given are correct
3876
  if group_ipolicy:
3877
    forced_type = TISPECS_GROUP_TYPES
3878
  else:
3879
    forced_type = TISPECS_CLUSTER_TYPES
3880
  for specs in ispecs_transposed.values():
3881
    assert type(specs) is dict
3882
    utils.ForceDictType(specs, forced_type)
3883

    
3884
  # then transpose
3885
  ispecs = {
3886
    constants.ISPECS_MIN: {},
3887
    constants.ISPECS_MAX: {},
3888
    constants.ISPECS_STD: {},
3889
    }
3890
  for (name, specs) in ispecs_transposed.iteritems():
3891
    assert name in constants.ISPECS_PARAMETERS
3892
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3893
      assert key in ispecs
3894
      ispecs[key][name] = val
3895
  ipolicy[constants.ISPECS_MINMAX] = {}
3896
  for key in constants.ISPECS_MINMAX_KEYS:
3897
    if fill_all:
3898
      ipolicy[constants.ISPECS_MINMAX][key] = \
3899
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3900
    else:
3901
      ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
3902
  if fill_all:
3903
    ipolicy[constants.ISPECS_STD] = \
3904
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3905
                         ispecs[constants.ISPECS_STD])
3906
  else:
3907
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3908

    
3909

    
3910
def _ParseSpecUnit(spec, keyname):
3911
  ret = spec.copy()
3912
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3913
    if k in ret:
3914
      try:
3915
        ret[k] = utils.ParseUnit(ret[k])
3916
      except (TypeError, ValueError, errors.UnitParseError), err:
3917
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3918
                                    " specs: %s" % (k, ret[k], keyname, err)),
3919
                                   errors.ECODE_INVAL)
3920
  return ret
3921

    
3922

    
3923
def _ParseISpec(spec, keyname, required):
3924
  ret = _ParseSpecUnit(spec, keyname)
3925
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3926
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3927
  if required and missing:
3928
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3929
                               (keyname, utils.CommaJoin(missing)),
3930
                               errors.ECODE_INVAL)
3931
  return ret
3932

    
3933

    
3934
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3935
  ret = None
3936
  if minmax_ispecs and allowed_values and len(minmax_ispecs) == 1:
3937
    for (key, spec) in minmax_ispecs.items():
3938
      # This loop is executed exactly once
3939
      if key in allowed_values and not spec:
3940
        ret = key
3941
  return ret
3942

    
3943

    
3944
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3945
                            group_ipolicy, allowed_values):
3946
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3947
  if found_allowed is not None:
3948
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3949
  elif minmax_ispecs is not None:
3950
    minmax_out = {}
3951
    for (key, spec) in minmax_ispecs.items():
3952
      if key not in constants.ISPECS_MINMAX_KEYS:
3953
        msg = "Invalid key in bounds instance specifications: %s" % key
3954
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3955
      minmax_out[key] = _ParseISpec(spec, key, True)
3956
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3957
  if std_ispecs is not None:
3958
    assert not group_ipolicy # This is not an option for gnt-group
3959
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
3960

    
3961

    
3962
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3963
                          ispecs_cpu_count=None,
3964
                          ispecs_disk_count=None,
3965
                          ispecs_disk_size=None,
3966
                          ispecs_nic_count=None,
3967
                          minmax_ispecs=None,
3968
                          std_ispecs=None,
3969
                          ipolicy_disk_templates=None,
3970
                          ipolicy_vcpu_ratio=None,
3971
                          ipolicy_spindle_ratio=None,
3972
                          group_ipolicy=False,
3973
                          allowed_values=None,
3974
                          fill_all=False):
3975
  """Creation of instance policy based on command line options.
3976

3977
  @param fill_all: whether for cluster policies we should ensure that
3978
    all values are filled
3979

3980
  """
3981
  assert not (fill_all and allowed_values)
3982

    
3983
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
3984
                 ispecs_disk_size or ispecs_nic_count)
3985
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
3986
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
3987
                               " together with any --ipolicy-xxx-specs option",
3988
                               errors.ECODE_INVAL)
3989

    
3990
  ipolicy_out = objects.MakeEmptyIPolicy()
3991
  if split_specs:
3992
    assert fill_all
3993
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
3994
                             ispecs_disk_count, ispecs_disk_size,
3995
                             ispecs_nic_count, group_ipolicy, fill_all)
3996
  elif (minmax_ispecs is not None or std_ispecs is not None):
3997
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3998
                            group_ipolicy, allowed_values)
3999

    
4000
  if ipolicy_disk_templates is not None:
4001
    if allowed_values and ipolicy_disk_templates in allowed_values:
4002
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4003
    else:
4004
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4005
  if ipolicy_vcpu_ratio is not None:
4006
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4007
  if ipolicy_spindle_ratio is not None:
4008
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4009

    
4010
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4011

    
4012
  if not group_ipolicy and fill_all:
4013
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4014

    
4015
  return ipolicy_out
4016

    
4017

    
4018
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4019
  """Formatting core of L{PrintGenericInfo}.
4020

4021
  @param buf: (string) stream to accumulate the result into
4022
  @param data: data to format
4023
  @type level: int
4024
  @param level: depth in the data hierarchy, used for indenting
4025
  @type afterkey: bool
4026
  @param afterkey: True when we are in the middle of a line after a key (used
4027
      to properly add newlines or indentation)
4028

4029
  """
4030
  baseind = "  "
4031
  if isinstance(data, dict):
4032
    if not data:
4033
      buf.write("\n")
4034
    else:
4035
      if afterkey:
4036
        buf.write("\n")
4037
        doindent = True
4038
      else:
4039
        doindent = False
4040
      for key in sorted(data):
4041
        if doindent:
4042
          buf.write(baseind * level)
4043
        else:
4044
          doindent = True
4045
        buf.write(key)
4046
        buf.write(": ")
4047
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4048
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4049
    # list of tuples (an ordered dictionary)
4050
    if afterkey:
4051
      buf.write("\n")
4052
      doindent = True
4053
    else:
4054
      doindent = False
4055
    for (key, val) in data:
4056
      if doindent:
4057
        buf.write(baseind * level)
4058
      else:
4059
        doindent = True
4060
      buf.write(key)
4061
      buf.write(": ")
4062
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4063
  elif isinstance(data, list):
4064
    if not data:
4065
      buf.write("\n")
4066
    else:
4067
      if afterkey:
4068
        buf.write("\n")
4069
        doindent = True
4070
      else:
4071
        doindent = False
4072
      for item in data:
4073
        if doindent:
4074
          buf.write(baseind * level)
4075
        else:
4076
          doindent = True
4077
        buf.write("-")
4078
        buf.write(baseind[1:])
4079
        _SerializeGenericInfo(buf, item, level + 1)
4080
  else:
4081
    # This branch should be only taken for strings, but it's practically
4082
    # impossible to guarantee that no other types are produced somewhere
4083
    buf.write(str(data))
4084
    buf.write("\n")
4085

    
4086

    
4087
def PrintGenericInfo(data):
4088
  """Print information formatted according to the hierarchy.
4089

4090
  The output is a valid YAML string.
4091

4092
  @param data: the data to print. It's a hierarchical structure whose elements
4093
      can be:
4094
        - dictionaries, where keys are strings and values are of any of the
4095
          types listed here
4096
        - lists of pairs (key, value), where key is a string and value is of
4097
          any of the types listed here; it's a way to encode ordered
4098
          dictionaries
4099
        - lists of any of the types listed here
4100
        - strings
4101

4102
  """
4103
  buf = StringIO()
4104
  _SerializeGenericInfo(buf, data, 0)
4105
  ToStdout(buf.getvalue().rstrip("\n"))