Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 66af5ec5

History | View | Annotate | Download (127.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  # FIXME: disable storage types once disk templates are fully implemented.
85
  "ENABLED_STORAGE_TYPES_OPT",
86
  "ENABLED_DISK_TEMPLATES_OPT",
87
  "ERROR_CODES_OPT",
88
  "FAILURE_ONLY_OPT",
89
  "FIELDS_OPT",
90
  "FILESTORE_DIR_OPT",
91
  "FILESTORE_DRIVER_OPT",
92
  "FORCE_FILTER_OPT",
93
  "FORCE_OPT",
94
  "FORCE_VARIANT_OPT",
95
  "GATEWAY_OPT",
96
  "GATEWAY6_OPT",
97
  "GLOBAL_FILEDIR_OPT",
98
  "HID_OS_OPT",
99
  "GLOBAL_SHARED_FILEDIR_OPT",
100
  "HVLIST_OPT",
101
  "HVOPTS_OPT",
102
  "HYPERVISOR_OPT",
103
  "IALLOCATOR_OPT",
104
  "DEFAULT_IALLOCATOR_OPT",
105
  "IDENTIFY_DEFAULTS_OPT",
106
  "IGNORE_CONSIST_OPT",
107
  "IGNORE_ERRORS_OPT",
108
  "IGNORE_FAILURES_OPT",
109
  "IGNORE_OFFLINE_OPT",
110
  "IGNORE_REMOVE_FAILURES_OPT",
111
  "IGNORE_SECONDARIES_OPT",
112
  "IGNORE_SIZE_OPT",
113
  "INTERVAL_OPT",
114
  "MAC_PREFIX_OPT",
115
  "MAINTAIN_NODE_HEALTH_OPT",
116
  "MASTER_NETDEV_OPT",
117
  "MASTER_NETMASK_OPT",
118
  "MC_OPT",
119
  "MIGRATION_MODE_OPT",
120
  "NET_OPT",
121
  "NETWORK_OPT",
122
  "NETWORK6_OPT",
123
  "NEW_CLUSTER_CERT_OPT",
124
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
125
  "NEW_CONFD_HMAC_KEY_OPT",
126
  "NEW_RAPI_CERT_OPT",
127
  "NEW_PRIMARY_OPT",
128
  "NEW_SECONDARY_OPT",
129
  "NEW_SPICE_CERT_OPT",
130
  "NIC_PARAMS_OPT",
131
  "NOCONFLICTSCHECK_OPT",
132
  "NODE_FORCE_JOIN_OPT",
133
  "NODE_LIST_OPT",
134
  "NODE_PLACEMENT_OPT",
135
  "NODEGROUP_OPT",
136
  "NODE_PARAMS_OPT",
137
  "NODE_POWERED_OPT",
138
  "NODRBD_STORAGE_OPT",
139
  "NOHDR_OPT",
140
  "NOIPCHECK_OPT",
141
  "NO_INSTALL_OPT",
142
  "NONAMECHECK_OPT",
143
  "NOLVM_STORAGE_OPT",
144
  "NOMODIFY_ETCHOSTS_OPT",
145
  "NOMODIFY_SSH_SETUP_OPT",
146
  "NONICS_OPT",
147
  "NONLIVE_OPT",
148
  "NONPLUS1_OPT",
149
  "NORUNTIME_CHGS_OPT",
150
  "NOSHUTDOWN_OPT",
151
  "NOSTART_OPT",
152
  "NOSSH_KEYCHECK_OPT",
153
  "NOVOTING_OPT",
154
  "NO_REMEMBER_OPT",
155
  "NWSYNC_OPT",
156
  "OFFLINE_INST_OPT",
157
  "ONLINE_INST_OPT",
158
  "ON_PRIMARY_OPT",
159
  "ON_SECONDARY_OPT",
160
  "OFFLINE_OPT",
161
  "OSPARAMS_OPT",
162
  "OS_OPT",
163
  "OS_SIZE_OPT",
164
  "OOB_TIMEOUT_OPT",
165
  "POWER_DELAY_OPT",
166
  "PREALLOC_WIPE_DISKS_OPT",
167
  "PRIMARY_IP_VERSION_OPT",
168
  "PRIMARY_ONLY_OPT",
169
  "PRIORITY_OPT",
170
  "RAPI_CERT_OPT",
171
  "READD_OPT",
172
  "REASON_OPT",
173
  "REBOOT_TYPE_OPT",
174
  "REMOVE_INSTANCE_OPT",
175
  "REMOVE_RESERVED_IPS_OPT",
176
  "REMOVE_UIDS_OPT",
177
  "RESERVED_LVS_OPT",
178
  "RUNTIME_MEM_OPT",
179
  "ROMAN_OPT",
180
  "SECONDARY_IP_OPT",
181
  "SECONDARY_ONLY_OPT",
182
  "SELECT_OS_OPT",
183
  "SEP_OPT",
184
  "SHOWCMD_OPT",
185
  "SHOW_MACHINE_OPT",
186
  "SHUTDOWN_TIMEOUT_OPT",
187
  "SINGLE_NODE_OPT",
188
  "SPECS_CPU_COUNT_OPT",
189
  "SPECS_DISK_COUNT_OPT",
190
  "SPECS_DISK_SIZE_OPT",
191
  "SPECS_MEM_SIZE_OPT",
192
  "SPECS_NIC_COUNT_OPT",
193
  "IPOLICY_DISK_TEMPLATES",
194
  "IPOLICY_VCPU_RATIO",
195
  "SPICE_CACERT_OPT",
196
  "SPICE_CERT_OPT",
197
  "SRC_DIR_OPT",
198
  "SRC_NODE_OPT",
199
  "SUBMIT_OPT",
200
  "STARTUP_PAUSED_OPT",
201
  "STATIC_OPT",
202
  "SYNC_OPT",
203
  "TAG_ADD_OPT",
204
  "TAG_SRC_OPT",
205
  "TIMEOUT_OPT",
206
  "TO_GROUP_OPT",
207
  "UIDPOOL_OPT",
208
  "USEUNITS_OPT",
209
  "USE_EXTERNAL_MIP_SCRIPT",
210
  "USE_REPL_NET_OPT",
211
  "VERBOSE_OPT",
212
  "VG_NAME_OPT",
213
  "WFSYNC_OPT",
214
  "YES_DOIT_OPT",
215
  "DISK_STATE_OPT",
216
  "HV_STATE_OPT",
217
  "IGNORE_IPOLICY_OPT",
218
  "INSTANCE_POLICY_OPTS",
219
  # Generic functions for CLI programs
220
  "ConfirmOperation",
221
  "CreateIPolicyFromOpts",
222
  "GenericMain",
223
  "GenericInstanceCreate",
224
  "GenericList",
225
  "GenericListFields",
226
  "GetClient",
227
  "GetOnlineNodes",
228
  "JobExecutor",
229
  "JobSubmittedException",
230
  "ParseTimespec",
231
  "RunWhileClusterStopped",
232
  "SubmitOpCode",
233
  "SubmitOrSend",
234
  "UsesRPC",
235
  # Formatting functions
236
  "ToStderr", "ToStdout",
237
  "FormatError",
238
  "FormatQueryResult",
239
  "FormatParamsDictInfo",
240
  "FormatPolicyInfo",
241
  "PrintGenericInfo",
242
  "GenerateTable",
243
  "AskUser",
244
  "FormatTimestamp",
245
  "FormatLogMessage",
246
  # Tags functions
247
  "ListTags",
248
  "AddTags",
249
  "RemoveTags",
250
  # command line options support infrastructure
251
  "ARGS_MANY_INSTANCES",
252
  "ARGS_MANY_NODES",
253
  "ARGS_MANY_GROUPS",
254
  "ARGS_MANY_NETWORKS",
255
  "ARGS_NONE",
256
  "ARGS_ONE_INSTANCE",
257
  "ARGS_ONE_NODE",
258
  "ARGS_ONE_GROUP",
259
  "ARGS_ONE_OS",
260
  "ARGS_ONE_NETWORK",
261
  "ArgChoice",
262
  "ArgCommand",
263
  "ArgFile",
264
  "ArgGroup",
265
  "ArgHost",
266
  "ArgInstance",
267
  "ArgJobId",
268
  "ArgNetwork",
269
  "ArgNode",
270
  "ArgOs",
271
  "ArgExtStorage",
272
  "ArgSuggest",
273
  "ArgUnknown",
274
  "OPT_COMPL_INST_ADD_NODES",
275
  "OPT_COMPL_MANY_NODES",
276
  "OPT_COMPL_ONE_IALLOCATOR",
277
  "OPT_COMPL_ONE_INSTANCE",
278
  "OPT_COMPL_ONE_NODE",
279
  "OPT_COMPL_ONE_NODEGROUP",
280
  "OPT_COMPL_ONE_NETWORK",
281
  "OPT_COMPL_ONE_OS",
282
  "OPT_COMPL_ONE_EXTSTORAGE",
283
  "cli_option",
284
  "SplitNodeOption",
285
  "CalculateOSNames",
286
  "ParseFields",
287
  "COMMON_CREATE_OPTS",
288
  ]
289

    
290
NO_PREFIX = "no_"
291
UN_PREFIX = "-"
292

    
293
#: Priorities (sorted)
294
_PRIORITY_NAMES = [
295
  ("low", constants.OP_PRIO_LOW),
296
  ("normal", constants.OP_PRIO_NORMAL),
297
  ("high", constants.OP_PRIO_HIGH),
298
  ]
299

    
300
#: Priority dictionary for easier lookup
301
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
302
# we migrate to Python 2.6
303
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
304

    
305
# Query result status for clients
306
(QR_NORMAL,
307
 QR_UNKNOWN,
308
 QR_INCOMPLETE) = range(3)
309

    
310
#: Maximum batch size for ChooseJob
311
_CHOOSE_BATCH = 25
312

    
313

    
314
# constants used to create InstancePolicy dictionary
315
TISPECS_GROUP_TYPES = {
316
  constants.ISPECS_MIN: constants.VTYPE_INT,
317
  constants.ISPECS_MAX: constants.VTYPE_INT,
318
  }
319

    
320
TISPECS_CLUSTER_TYPES = {
321
  constants.ISPECS_MIN: constants.VTYPE_INT,
322
  constants.ISPECS_MAX: constants.VTYPE_INT,
323
  constants.ISPECS_STD: constants.VTYPE_INT,
324
  }
325

    
326
#: User-friendly names for query2 field types
327
_QFT_NAMES = {
328
  constants.QFT_UNKNOWN: "Unknown",
329
  constants.QFT_TEXT: "Text",
330
  constants.QFT_BOOL: "Boolean",
331
  constants.QFT_NUMBER: "Number",
332
  constants.QFT_UNIT: "Storage size",
333
  constants.QFT_TIMESTAMP: "Timestamp",
334
  constants.QFT_OTHER: "Custom",
335
  }
336

    
337

    
338
class _Argument:
339
  def __init__(self, min=0, max=None): # pylint: disable=W0622
340
    self.min = min
341
    self.max = max
342

    
343
  def __repr__(self):
344
    return ("<%s min=%s max=%s>" %
345
            (self.__class__.__name__, self.min, self.max))
346

    
347

    
348
class ArgSuggest(_Argument):
349
  """Suggesting argument.
350

351
  Value can be any of the ones passed to the constructor.
352

353
  """
354
  # pylint: disable=W0622
355
  def __init__(self, min=0, max=None, choices=None):
356
    _Argument.__init__(self, min=min, max=max)
357
    self.choices = choices
358

    
359
  def __repr__(self):
360
    return ("<%s min=%s max=%s choices=%r>" %
361
            (self.__class__.__name__, self.min, self.max, self.choices))
362

    
363

    
364
class ArgChoice(ArgSuggest):
365
  """Choice argument.
366

367
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
368
  but value must be one of the choices.
369

370
  """
371

    
372

    
373
class ArgUnknown(_Argument):
374
  """Unknown argument to program (e.g. determined at runtime).
375

376
  """
377

    
378

    
379
class ArgInstance(_Argument):
380
  """Instances argument.
381

382
  """
383

    
384

    
385
class ArgNode(_Argument):
386
  """Node argument.
387

388
  """
389

    
390

    
391
class ArgNetwork(_Argument):
392
  """Network argument.
393

394
  """
395

    
396

    
397
class ArgGroup(_Argument):
398
  """Node group argument.
399

400
  """
401

    
402

    
403
class ArgJobId(_Argument):
404
  """Job ID argument.
405

406
  """
407

    
408

    
409
class ArgFile(_Argument):
410
  """File path argument.
411

412
  """
413

    
414

    
415
class ArgCommand(_Argument):
416
  """Command argument.
417

418
  """
419

    
420

    
421
class ArgHost(_Argument):
422
  """Host argument.
423

424
  """
425

    
426

    
427
class ArgOs(_Argument):
428
  """OS argument.
429

430
  """
431

    
432

    
433
class ArgExtStorage(_Argument):
434
  """ExtStorage argument.
435

436
  """
437

    
438

    
439
ARGS_NONE = []
440
ARGS_MANY_INSTANCES = [ArgInstance()]
441
ARGS_MANY_NETWORKS = [ArgNetwork()]
442
ARGS_MANY_NODES = [ArgNode()]
443
ARGS_MANY_GROUPS = [ArgGroup()]
444
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
445
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
446
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
447
# TODO
448
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
449
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
450

    
451

    
452
def _ExtractTagsObject(opts, args):
453
  """Extract the tag type object.
454

455
  Note that this function will modify its args parameter.
456

457
  """
458
  if not hasattr(opts, "tag_type"):
459
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
460
  kind = opts.tag_type
461
  if kind == constants.TAG_CLUSTER:
462
    retval = kind, None
463
  elif kind in (constants.TAG_NODEGROUP,
464
                constants.TAG_NODE,
465
                constants.TAG_NETWORK,
466
                constants.TAG_INSTANCE):
467
    if not args:
468
      raise errors.OpPrereqError("no arguments passed to the command",
469
                                 errors.ECODE_INVAL)
470
    name = args.pop(0)
471
    retval = kind, name
472
  else:
473
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
474
  return retval
475

    
476

    
477
def _ExtendTags(opts, args):
478
  """Extend the args if a source file has been given.
479

480
  This function will extend the tags with the contents of the file
481
  passed in the 'tags_source' attribute of the opts parameter. A file
482
  named '-' will be replaced by stdin.
483

484
  """
485
  fname = opts.tags_source
486
  if fname is None:
487
    return
488
  if fname == "-":
489
    new_fh = sys.stdin
490
  else:
491
    new_fh = open(fname, "r")
492
  new_data = []
493
  try:
494
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
495
    # because of python bug 1633941
496
    while True:
497
      line = new_fh.readline()
498
      if not line:
499
        break
500
      new_data.append(line.strip())
501
  finally:
502
    new_fh.close()
503
  args.extend(new_data)
504

    
505

    
506
def ListTags(opts, args):
507
  """List the tags on a given object.
508

509
  This is a generic implementation that knows how to deal with all
510
  three cases of tag objects (cluster, node, instance). The opts
511
  argument is expected to contain a tag_type field denoting what
512
  object type we work on.
513

514
  """
515
  kind, name = _ExtractTagsObject(opts, args)
516
  cl = GetClient(query=True)
517
  result = cl.QueryTags(kind, name)
518
  result = list(result)
519
  result.sort()
520
  for tag in result:
521
    ToStdout(tag)
522

    
523

    
524
def AddTags(opts, args):
525
  """Add tags on a given object.
526

527
  This is a generic implementation that knows how to deal with all
528
  three cases of tag objects (cluster, node, instance). The opts
529
  argument is expected to contain a tag_type field denoting what
530
  object type we work on.
531

532
  """
533
  kind, name = _ExtractTagsObject(opts, args)
534
  _ExtendTags(opts, args)
535
  if not args:
536
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
537
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
538
  SubmitOrSend(op, opts)
539

    
540

    
541
def RemoveTags(opts, args):
542
  """Remove tags from a given object.
543

544
  This is a generic implementation that knows how to deal with all
545
  three cases of tag objects (cluster, node, instance). The opts
546
  argument is expected to contain a tag_type field denoting what
547
  object type we work on.
548

549
  """
550
  kind, name = _ExtractTagsObject(opts, args)
551
  _ExtendTags(opts, args)
552
  if not args:
553
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
554
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
555
  SubmitOrSend(op, opts)
556

    
557

    
558
def check_unit(option, opt, value): # pylint: disable=W0613
559
  """OptParsers custom converter for units.
560

561
  """
562
  try:
563
    return utils.ParseUnit(value)
564
  except errors.UnitParseError, err:
565
    raise OptionValueError("option %s: %s" % (opt, err))
566

    
567

    
568
def _SplitKeyVal(opt, data):
569
  """Convert a KeyVal string into a dict.
570

571
  This function will convert a key=val[,...] string into a dict. Empty
572
  values will be converted specially: keys which have the prefix 'no_'
573
  will have the value=False and the prefix stripped, the others will
574
  have value=True.
575

576
  @type opt: string
577
  @param opt: a string holding the option name for which we process the
578
      data, used in building error messages
579
  @type data: string
580
  @param data: a string of the format key=val,key=val,...
581
  @rtype: dict
582
  @return: {key=val, key=val}
583
  @raises errors.ParameterError: if there are duplicate keys
584

585
  """
586
  kv_dict = {}
587
  if data:
588
    for elem in utils.UnescapeAndSplit(data, sep=","):
589
      if "=" in elem:
590
        key, val = elem.split("=", 1)
591
      else:
592
        if elem.startswith(NO_PREFIX):
593
          key, val = elem[len(NO_PREFIX):], False
594
        elif elem.startswith(UN_PREFIX):
595
          key, val = elem[len(UN_PREFIX):], None
596
        else:
597
          key, val = elem, True
598
      if key in kv_dict:
599
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
600
                                    (key, opt))
601
      kv_dict[key] = val
602
  return kv_dict
603

    
604

    
605
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
606
  """Custom parser for ident:key=val,key=val options.
607

608
  This will store the parsed values as a tuple (ident, {key: val}). As such,
609
  multiple uses of this option via action=append is possible.
610

611
  """
612
  if ":" not in value:
613
    ident, rest = value, ""
614
  else:
615
    ident, rest = value.split(":", 1)
616

    
617
  if ident.startswith(NO_PREFIX):
618
    if rest:
619
      msg = "Cannot pass options when removing parameter groups: %s" % value
620
      raise errors.ParameterError(msg)
621
    retval = (ident[len(NO_PREFIX):], False)
622
  elif (ident.startswith(UN_PREFIX) and
623
        (len(ident) <= len(UN_PREFIX) or
624
         not ident[len(UN_PREFIX)][0].isdigit())):
625
    if rest:
626
      msg = "Cannot pass options when removing parameter groups: %s" % value
627
      raise errors.ParameterError(msg)
628
    retval = (ident[len(UN_PREFIX):], None)
629
  else:
630
    kv_dict = _SplitKeyVal(opt, rest)
631
    retval = (ident, kv_dict)
632
  return retval
633

    
634

    
635
def check_key_val(option, opt, value):  # pylint: disable=W0613
636
  """Custom parser class for key=val,key=val options.
637

638
  This will store the parsed values as a dict {key: val}.
639

640
  """
641
  return _SplitKeyVal(opt, value)
642

    
643

    
644
def check_bool(option, opt, value): # pylint: disable=W0613
645
  """Custom parser for yes/no options.
646

647
  This will store the parsed value as either True or False.
648

649
  """
650
  value = value.lower()
651
  if value == constants.VALUE_FALSE or value == "no":
652
    return False
653
  elif value == constants.VALUE_TRUE or value == "yes":
654
    return True
655
  else:
656
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
657

    
658

    
659
def check_list(option, opt, value): # pylint: disable=W0613
660
  """Custom parser for comma-separated lists.
661

662
  """
663
  # we have to make this explicit check since "".split(",") is [""],
664
  # not an empty list :(
665
  if not value:
666
    return []
667
  else:
668
    return utils.UnescapeAndSplit(value)
669

    
670

    
671
def check_maybefloat(option, opt, value): # pylint: disable=W0613
672
  """Custom parser for float numbers which might be also defaults.
673

674
  """
675
  value = value.lower()
676

    
677
  if value == constants.VALUE_DEFAULT:
678
    return value
679
  else:
680
    return float(value)
681

    
682

    
683
# completion_suggestion is normally a list. Using numeric values not evaluating
684
# to False for dynamic completion.
685
(OPT_COMPL_MANY_NODES,
686
 OPT_COMPL_ONE_NODE,
687
 OPT_COMPL_ONE_INSTANCE,
688
 OPT_COMPL_ONE_OS,
689
 OPT_COMPL_ONE_EXTSTORAGE,
690
 OPT_COMPL_ONE_IALLOCATOR,
691
 OPT_COMPL_ONE_NETWORK,
692
 OPT_COMPL_INST_ADD_NODES,
693
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
694

    
695
OPT_COMPL_ALL = compat.UniqueFrozenset([
696
  OPT_COMPL_MANY_NODES,
697
  OPT_COMPL_ONE_NODE,
698
  OPT_COMPL_ONE_INSTANCE,
699
  OPT_COMPL_ONE_OS,
700
  OPT_COMPL_ONE_EXTSTORAGE,
701
  OPT_COMPL_ONE_IALLOCATOR,
702
  OPT_COMPL_ONE_NETWORK,
703
  OPT_COMPL_INST_ADD_NODES,
704
  OPT_COMPL_ONE_NODEGROUP,
705
  ])
706

    
707

    
708
class CliOption(Option):
709
  """Custom option class for optparse.
710

711
  """
712
  ATTRS = Option.ATTRS + [
713
    "completion_suggest",
714
    ]
715
  TYPES = Option.TYPES + (
716
    "identkeyval",
717
    "keyval",
718
    "unit",
719
    "bool",
720
    "list",
721
    "maybefloat",
722
    )
723
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
724
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
725
  TYPE_CHECKER["keyval"] = check_key_val
726
  TYPE_CHECKER["unit"] = check_unit
727
  TYPE_CHECKER["bool"] = check_bool
728
  TYPE_CHECKER["list"] = check_list
729
  TYPE_CHECKER["maybefloat"] = check_maybefloat
730

    
731

    
732
# optparse.py sets make_option, so we do it for our own option class, too
733
cli_option = CliOption
734

    
735

    
736
_YORNO = "yes|no"
737

    
738
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
739
                       help="Increase debugging level")
740

    
741
NOHDR_OPT = cli_option("--no-headers", default=False,
742
                       action="store_true", dest="no_headers",
743
                       help="Don't display column headers")
744

    
745
SEP_OPT = cli_option("--separator", default=None,
746
                     action="store", dest="separator",
747
                     help=("Separator between output fields"
748
                           " (defaults to one space)"))
749

    
750
USEUNITS_OPT = cli_option("--units", default=None,
751
                          dest="units", choices=("h", "m", "g", "t"),
752
                          help="Specify units for output (one of h/m/g/t)")
753

    
754
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
755
                        type="string", metavar="FIELDS",
756
                        help="Comma separated list of output fields")
757

    
758
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
759
                       default=False, help="Force the operation")
760

    
761
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
762
                         default=False, help="Do not require confirmation")
763

    
764
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
765
                                  action="store_true", default=False,
766
                                  help=("Ignore offline nodes and do as much"
767
                                        " as possible"))
768

    
769
TAG_ADD_OPT = cli_option("--tags", dest="tags",
770
                         default=None, help="Comma-separated list of instance"
771
                                            " tags")
772

    
773
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
774
                         default=None, help="File with tag names")
775

    
776
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
777
                        default=False, action="store_true",
778
                        help=("Submit the job and return the job ID, but"
779
                              " don't wait for the job to finish"))
780

    
781
SYNC_OPT = cli_option("--sync", dest="do_locking",
782
                      default=False, action="store_true",
783
                      help=("Grab locks while doing the queries"
784
                            " in order to ensure more consistent results"))
785

    
786
DRY_RUN_OPT = cli_option("--dry-run", default=False,
787
                         action="store_true",
788
                         help=("Do not execute the operation, just run the"
789
                               " check steps and verify if it could be"
790
                               " executed"))
791

    
792
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
793
                         action="store_true",
794
                         help="Increase the verbosity of the operation")
795

    
796
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
797
                              action="store_true", dest="simulate_errors",
798
                              help="Debugging option that makes the operation"
799
                              " treat most runtime checks as failed")
800

    
801
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
802
                        default=True, action="store_false",
803
                        help="Don't wait for sync (DANGEROUS!)")
804

    
805
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
806
                        default=False, action="store_true",
807
                        help="Wait for disks to sync")
808

    
809
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
810
                             action="store_true", default=False,
811
                             help="Enable offline instance")
812

    
813
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
814
                              action="store_true", default=False,
815
                              help="Disable down instance")
816

    
817
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
818
                               help=("Custom disk setup (%s)" %
819
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
820
                               default=None, metavar="TEMPL",
821
                               choices=list(constants.DISK_TEMPLATES))
822

    
823
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
824
                        help="Do not create any network cards for"
825
                        " the instance")
826

    
827
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
828
                               help="Relative path under default cluster-wide"
829
                               " file storage dir to store file-based disks",
830
                               default=None, metavar="<DIR>")
831

    
832
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
833
                                  help="Driver to use for image files",
834
                                  default="loop", metavar="<DRIVER>",
835
                                  choices=list(constants.FILE_DRIVER))
836

    
837
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
838
                            help="Select nodes for the instance automatically"
839
                            " using the <NAME> iallocator plugin",
840
                            default=None, type="string",
841
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
842

    
843
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
844
                                    metavar="<NAME>",
845
                                    help="Set the default instance"
846
                                    " allocator plugin",
847
                                    default=None, type="string",
848
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
849

    
850
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
851
                    metavar="<os>",
852
                    completion_suggest=OPT_COMPL_ONE_OS)
853

    
854
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
855
                          type="keyval", default={},
856
                          help="OS parameters")
857

    
858
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
859
                               action="store_true", default=False,
860
                               help="Force an unknown variant")
861

    
862
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
863
                            action="store_true", default=False,
864
                            help="Do not install the OS (will"
865
                            " enable no-start)")
866

    
867
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
868
                                dest="allow_runtime_chgs",
869
                                default=True, action="store_false",
870
                                help="Don't allow runtime changes")
871

    
872
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
873
                         type="keyval", default={},
874
                         help="Backend parameters")
875

    
876
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
877
                        default={}, dest="hvparams",
878
                        help="Hypervisor parameters")
879

    
880
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
881
                             help="Disk template parameters, in the format"
882
                             " template:option=value,option=value,...",
883
                             type="identkeyval", action="append", default=[])
884

    
885
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
886
                                 type="keyval", default={},
887
                                 help="Memory size specs: list of key=value,"
888
                                " where key is one of min, max, std"
889
                                 " (in MB or using a unit)")
890

    
891
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
892
                                 type="keyval", default={},
893
                                 help="CPU count specs: list of key=value,"
894
                                 " where key is one of min, max, std")
895

    
896
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
897
                                  dest="ispecs_disk_count",
898
                                  type="keyval", default={},
899
                                  help="Disk count specs: list of key=value,"
900
                                  " where key is one of min, max, std")
901

    
902
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
903
                                 type="keyval", default={},
904
                                 help="Disk size specs: list of key=value,"
905
                                 " where key is one of min, max, std"
906
                                 " (in MB or using a unit)")
907

    
908
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
909
                                 type="keyval", default={},
910
                                 help="NIC count specs: list of key=value,"
911
                                 " where key is one of min, max, std")
912

    
913
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
914
                                    dest="ipolicy_disk_templates",
915
                                    type="list", default=None,
916
                                    help="Comma-separated list of"
917
                                    " enabled disk templates")
918

    
919
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
920
                                 dest="ipolicy_vcpu_ratio",
921
                                 type="maybefloat", default=None,
922
                                 help="The maximum allowed vcpu-to-cpu ratio")
923

    
924
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
925
                                   dest="ipolicy_spindle_ratio",
926
                                   type="maybefloat", default=None,
927
                                   help=("The maximum allowed instances to"
928
                                         " spindle ratio"))
929

    
930
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
931
                            help="Hypervisor and hypervisor options, in the"
932
                            " format hypervisor:option=value,option=value,...",
933
                            default=None, type="identkeyval")
934

    
935
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
936
                        help="Hypervisor and hypervisor options, in the"
937
                        " format hypervisor:option=value,option=value,...",
938
                        default=[], action="append", type="identkeyval")
939

    
940
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
941
                           action="store_false",
942
                           help="Don't check that the instance's IP"
943
                           " is alive")
944

    
945
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
946
                             default=True, action="store_false",
947
                             help="Don't check that the instance's name"
948
                             " is resolvable")
949

    
950
NET_OPT = cli_option("--net",
951
                     help="NIC parameters", default=[],
952
                     dest="nics", action="append", type="identkeyval")
953

    
954
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
955
                      dest="disks", action="append", type="identkeyval")
956

    
957
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
958
                         help="Comma-separated list of disks"
959
                         " indices to act on (e.g. 0,2) (optional,"
960
                         " defaults to all disks)")
961

    
962
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
963
                         help="Enforces a single-disk configuration using the"
964
                         " given disk size, in MiB unless a suffix is used",
965
                         default=None, type="unit", metavar="<size>")
966

    
967
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
968
                                dest="ignore_consistency",
969
                                action="store_true", default=False,
970
                                help="Ignore the consistency of the disks on"
971
                                " the secondary")
972

    
973
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
974
                                dest="allow_failover",
975
                                action="store_true", default=False,
976
                                help="If migration is not possible fallback to"
977
                                     " failover")
978

    
979
NONLIVE_OPT = cli_option("--non-live", dest="live",
980
                         default=True, action="store_false",
981
                         help="Do a non-live migration (this usually means"
982
                         " freeze the instance, save the state, transfer and"
983
                         " only then resume running on the secondary node)")
984

    
985
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
986
                                default=None,
987
                                choices=list(constants.HT_MIGRATION_MODES),
988
                                help="Override default migration mode (choose"
989
                                " either live or non-live")
990

    
991
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
992
                                help="Target node and optional secondary node",
993
                                metavar="<pnode>[:<snode>]",
994
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
995

    
996
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
997
                           action="append", metavar="<node>",
998
                           help="Use only this node (can be used multiple"
999
                           " times, if not given defaults to all nodes)",
1000
                           completion_suggest=OPT_COMPL_ONE_NODE)
1001

    
1002
NODEGROUP_OPT_NAME = "--node-group"
1003
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1004
                           dest="nodegroup",
1005
                           help="Node group (name or uuid)",
1006
                           metavar="<nodegroup>",
1007
                           default=None, type="string",
1008
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1009

    
1010
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1011
                             metavar="<node>",
1012
                             completion_suggest=OPT_COMPL_ONE_NODE)
1013

    
1014
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1015
                         action="store_false",
1016
                         help="Don't start the instance after creation")
1017

    
1018
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1019
                         action="store_true", default=False,
1020
                         help="Show command instead of executing it")
1021

    
1022
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1023
                         default=False, action="store_true",
1024
                         help="Instead of performing the migration, try to"
1025
                         " recover from a failed cleanup. This is safe"
1026
                         " to run even if the instance is healthy, but it"
1027
                         " will create extra replication traffic and "
1028
                         " disrupt briefly the replication (like during the"
1029
                         " migration")
1030

    
1031
STATIC_OPT = cli_option("-s", "--static", dest="static",
1032
                        action="store_true", default=False,
1033
                        help="Only show configuration data, not runtime data")
1034

    
1035
ALL_OPT = cli_option("--all", dest="show_all",
1036
                     default=False, action="store_true",
1037
                     help="Show info on all instances on the cluster."
1038
                     " This can take a long time to run, use wisely")
1039

    
1040
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1041
                           action="store_true", default=False,
1042
                           help="Interactive OS reinstall, lists available"
1043
                           " OS templates for selection")
1044

    
1045
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1046
                                 action="store_true", default=False,
1047
                                 help="Remove the instance from the cluster"
1048
                                 " configuration even if there are failures"
1049
                                 " during the removal process")
1050

    
1051
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1052
                                        dest="ignore_remove_failures",
1053
                                        action="store_true", default=False,
1054
                                        help="Remove the instance from the"
1055
                                        " cluster configuration even if there"
1056
                                        " are failures during the removal"
1057
                                        " process")
1058

    
1059
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1060
                                 action="store_true", default=False,
1061
                                 help="Remove the instance from the cluster")
1062

    
1063
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1064
                               help="Specifies the new node for the instance",
1065
                               metavar="NODE", default=None,
1066
                               completion_suggest=OPT_COMPL_ONE_NODE)
1067

    
1068
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1069
                               help="Specifies the new secondary node",
1070
                               metavar="NODE", default=None,
1071
                               completion_suggest=OPT_COMPL_ONE_NODE)
1072

    
1073
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1074
                             help="Specifies the new primary node",
1075
                             metavar="<node>", default=None,
1076
                             completion_suggest=OPT_COMPL_ONE_NODE)
1077

    
1078
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1079
                            default=False, action="store_true",
1080
                            help="Replace the disk(s) on the primary"
1081
                                 " node (applies only to internally mirrored"
1082
                                 " disk templates, e.g. %s)" %
1083
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1084

    
1085
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1086
                              default=False, action="store_true",
1087
                              help="Replace the disk(s) on the secondary"
1088
                                   " node (applies only to internally mirrored"
1089
                                   " disk templates, e.g. %s)" %
1090
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1091

    
1092
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1093
                              default=False, action="store_true",
1094
                              help="Lock all nodes and auto-promote as needed"
1095
                              " to MC status")
1096

    
1097
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1098
                              default=False, action="store_true",
1099
                              help="Automatically replace faulty disks"
1100
                                   " (applies only to internally mirrored"
1101
                                   " disk templates, e.g. %s)" %
1102
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1103

    
1104
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1105
                             default=False, action="store_true",
1106
                             help="Ignore current recorded size"
1107
                             " (useful for forcing activation when"
1108
                             " the recorded size is wrong)")
1109

    
1110
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1111
                          metavar="<node>",
1112
                          completion_suggest=OPT_COMPL_ONE_NODE)
1113

    
1114
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1115
                         metavar="<dir>")
1116

    
1117
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1118
                              help="Specify the secondary ip for the node",
1119
                              metavar="ADDRESS", default=None)
1120

    
1121
READD_OPT = cli_option("--readd", dest="readd",
1122
                       default=False, action="store_true",
1123
                       help="Readd old node after replacing it")
1124

    
1125
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1126
                                default=True, action="store_false",
1127
                                help="Disable SSH key fingerprint checking")
1128

    
1129
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1130
                                 default=False, action="store_true",
1131
                                 help="Force the joining of a node")
1132

    
1133
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1134
                    type="bool", default=None, metavar=_YORNO,
1135
                    help="Set the master_candidate flag on the node")
1136

    
1137
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1138
                         type="bool", default=None,
1139
                         help=("Set the offline flag on the node"
1140
                               " (cluster does not communicate with offline"
1141
                               " nodes)"))
1142

    
1143
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1144
                         type="bool", default=None,
1145
                         help=("Set the drained flag on the node"
1146
                               " (excluded from allocation operations)"))
1147

    
1148
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1149
                              type="bool", default=None, metavar=_YORNO,
1150
                              help="Set the master_capable flag on the node")
1151

    
1152
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1153
                          type="bool", default=None, metavar=_YORNO,
1154
                          help="Set the vm_capable flag on the node")
1155

    
1156
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1157
                             type="bool", default=None, metavar=_YORNO,
1158
                             help="Set the allocatable flag on a volume")
1159

    
1160
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1161
                               help="Disable support for lvm based instances"
1162
                               " (cluster-wide)",
1163
                               action="store_false", default=True)
1164

    
1165
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1166
                            dest="enabled_hypervisors",
1167
                            help="Comma-separated list of hypervisors",
1168
                            type="string", default=None)
1169

    
1170
# FIXME: Remove once enabled disk templates are fully implemented.
1171
ENABLED_STORAGE_TYPES_OPT = cli_option("--enabled-storage-types",
1172
                                       dest="enabled_storage_types",
1173
                                       help="Comma-separated list of "
1174
                                            "storage methods",
1175
                                       type="string", default=None)
1176

    
1177
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1178
                                        dest="enabled_disk_templates",
1179
                                        help="Comma-separated list of "
1180
                                             "disk templates",
1181
                                        type="string", default=None)
1182

    
1183
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1184
                            type="keyval", default={},
1185
                            help="NIC parameters")
1186

    
1187
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1188
                         dest="candidate_pool_size", type="int",
1189
                         help="Set the candidate pool size")
1190

    
1191
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1192
                         help=("Enables LVM and specifies the volume group"
1193
                               " name (cluster-wide) for disk allocation"
1194
                               " [%s]" % constants.DEFAULT_VG),
1195
                         metavar="VG", default=None)
1196

    
1197
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1198
                          help="Destroy cluster", action="store_true")
1199

    
1200
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1201
                          help="Skip node agreement check (dangerous)",
1202
                          action="store_true", default=False)
1203

    
1204
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1205
                            help="Specify the mac prefix for the instance IP"
1206
                            " addresses, in the format XX:XX:XX",
1207
                            metavar="PREFIX",
1208
                            default=None)
1209

    
1210
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1211
                               help="Specify the node interface (cluster-wide)"
1212
                               " on which the master IP address will be added"
1213
                               " (cluster init default: %s)" %
1214
                               constants.DEFAULT_BRIDGE,
1215
                               metavar="NETDEV",
1216
                               default=None)
1217

    
1218
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1219
                                help="Specify the netmask of the master IP",
1220
                                metavar="NETMASK",
1221
                                default=None)
1222

    
1223
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1224
                                     dest="use_external_mip_script",
1225
                                     help="Specify whether to run a"
1226
                                     " user-provided script for the master"
1227
                                     " IP address turnup and"
1228
                                     " turndown operations",
1229
                                     type="bool", metavar=_YORNO, default=None)
1230

    
1231
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1232
                                help="Specify the default directory (cluster-"
1233
                                "wide) for storing the file-based disks [%s]" %
1234
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1235
                                metavar="DIR",
1236
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1237

    
1238
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1239
  "--shared-file-storage-dir",
1240
  dest="shared_file_storage_dir",
1241
  help="Specify the default directory (cluster-wide) for storing the"
1242
  " shared file-based disks [%s]" %
1243
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1244
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1245

    
1246
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1247
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1248
                                   action="store_false", default=True)
1249

    
1250
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1251
                                    help="Don't initialize SSH keys",
1252
                                    action="store_false", default=True)
1253

    
1254
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1255
                             help="Enable parseable error messages",
1256
                             action="store_true", default=False)
1257

    
1258
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1259
                          help="Skip N+1 memory redundancy tests",
1260
                          action="store_true", default=False)
1261

    
1262
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1263
                             help="Type of reboot: soft/hard/full",
1264
                             default=constants.INSTANCE_REBOOT_HARD,
1265
                             metavar="<REBOOT>",
1266
                             choices=list(constants.REBOOT_TYPES))
1267

    
1268
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1269
                                    dest="ignore_secondaries",
1270
                                    default=False, action="store_true",
1271
                                    help="Ignore errors from secondaries")
1272

    
1273
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1274
                            action="store_false", default=True,
1275
                            help="Don't shutdown the instance (unsafe)")
1276

    
1277
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1278
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1279
                         help="Maximum time to wait")
1280

    
1281
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1282
                                  dest="shutdown_timeout", type="int",
1283
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1284
                                  help="Maximum time to wait for instance"
1285
                                  " shutdown")
1286

    
1287
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1288
                          default=None,
1289
                          help=("Number of seconds between repetions of the"
1290
                                " command"))
1291

    
1292
EARLY_RELEASE_OPT = cli_option("--early-release",
1293
                               dest="early_release", default=False,
1294
                               action="store_true",
1295
                               help="Release the locks on the secondary"
1296
                               " node(s) early")
1297

    
1298
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1299
                                  dest="new_cluster_cert",
1300
                                  default=False, action="store_true",
1301
                                  help="Generate a new cluster certificate")
1302

    
1303
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1304
                           default=None,
1305
                           help="File containing new RAPI certificate")
1306

    
1307
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1308
                               default=None, action="store_true",
1309
                               help=("Generate a new self-signed RAPI"
1310
                                     " certificate"))
1311

    
1312
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1313
                            default=None,
1314
                            help="File containing new SPICE certificate")
1315

    
1316
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1317
                              default=None,
1318
                              help="File containing the certificate of the CA"
1319
                              " which signed the SPICE certificate")
1320

    
1321
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1322
                                dest="new_spice_cert", default=None,
1323
                                action="store_true",
1324
                                help=("Generate a new self-signed SPICE"
1325
                                      " certificate"))
1326

    
1327
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1328
                                    dest="new_confd_hmac_key",
1329
                                    default=False, action="store_true",
1330
                                    help=("Create a new HMAC key for %s" %
1331
                                          constants.CONFD))
1332

    
1333
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1334
                                       dest="cluster_domain_secret",
1335
                                       default=None,
1336
                                       help=("Load new new cluster domain"
1337
                                             " secret from file"))
1338

    
1339
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1340
                                           dest="new_cluster_domain_secret",
1341
                                           default=False, action="store_true",
1342
                                           help=("Create a new cluster domain"
1343
                                                 " secret"))
1344

    
1345
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1346
                              dest="use_replication_network",
1347
                              help="Whether to use the replication network"
1348
                              " for talking to the nodes",
1349
                              action="store_true", default=False)
1350

    
1351
MAINTAIN_NODE_HEALTH_OPT = \
1352
    cli_option("--maintain-node-health", dest="maintain_node_health",
1353
               metavar=_YORNO, default=None, type="bool",
1354
               help="Configure the cluster to automatically maintain node"
1355
               " health, by shutting down unknown instances, shutting down"
1356
               " unknown DRBD devices, etc.")
1357

    
1358
IDENTIFY_DEFAULTS_OPT = \
1359
    cli_option("--identify-defaults", dest="identify_defaults",
1360
               default=False, action="store_true",
1361
               help="Identify which saved instance parameters are equal to"
1362
               " the current cluster defaults and set them as such, instead"
1363
               " of marking them as overridden")
1364

    
1365
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1366
                         action="store", dest="uid_pool",
1367
                         help=("A list of user-ids or user-id"
1368
                               " ranges separated by commas"))
1369

    
1370
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1371
                          action="store", dest="add_uids",
1372
                          help=("A list of user-ids or user-id"
1373
                                " ranges separated by commas, to be"
1374
                                " added to the user-id pool"))
1375

    
1376
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1377
                             action="store", dest="remove_uids",
1378
                             help=("A list of user-ids or user-id"
1379
                                   " ranges separated by commas, to be"
1380
                                   " removed from the user-id pool"))
1381

    
1382
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1383
                              action="store", dest="reserved_lvs",
1384
                              help=("A comma-separated list of reserved"
1385
                                    " logical volumes names, that will be"
1386
                                    " ignored by cluster verify"))
1387

    
1388
ROMAN_OPT = cli_option("--roman",
1389
                       dest="roman_integers", default=False,
1390
                       action="store_true",
1391
                       help="Use roman numbers for positive integers")
1392

    
1393
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1394
                             action="store", default=None,
1395
                             help="Specifies usermode helper for DRBD")
1396

    
1397
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1398
                                action="store_false", default=True,
1399
                                help="Disable support for DRBD")
1400

    
1401
PRIMARY_IP_VERSION_OPT = \
1402
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1403
               action="store", dest="primary_ip_version",
1404
               metavar="%d|%d" % (constants.IP4_VERSION,
1405
                                  constants.IP6_VERSION),
1406
               help="Cluster-wide IP version for primary IP")
1407

    
1408
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1409
                              action="store_true",
1410
                              help="Show machine name for every line in output")
1411

    
1412
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1413
                              action="store_true",
1414
                              help=("Hide successful results and show failures"
1415
                                    " only (determined by the exit code)"))
1416

    
1417
REASON_OPT = cli_option("--reason", default=None,
1418
                        help="The reason for executing a VM-state-changing"
1419
                             " operation")
1420

    
1421

    
1422
def _PriorityOptionCb(option, _, value, parser):
1423
  """Callback for processing C{--priority} option.
1424

1425
  """
1426
  value = _PRIONAME_TO_VALUE[value]
1427

    
1428
  setattr(parser.values, option.dest, value)
1429

    
1430

    
1431
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1432
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1433
                          choices=_PRIONAME_TO_VALUE.keys(),
1434
                          action="callback", type="choice",
1435
                          callback=_PriorityOptionCb,
1436
                          help="Priority for opcode processing")
1437

    
1438
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1439
                        type="bool", default=None, metavar=_YORNO,
1440
                        help="Sets the hidden flag on the OS")
1441

    
1442
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1443
                        type="bool", default=None, metavar=_YORNO,
1444
                        help="Sets the blacklisted flag on the OS")
1445

    
1446
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1447
                                     type="bool", metavar=_YORNO,
1448
                                     dest="prealloc_wipe_disks",
1449
                                     help=("Wipe disks prior to instance"
1450
                                           " creation"))
1451

    
1452
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1453
                             type="keyval", default=None,
1454
                             help="Node parameters")
1455

    
1456
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1457
                              action="store", metavar="POLICY", default=None,
1458
                              help="Allocation policy for the node group")
1459

    
1460
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1461
                              type="bool", metavar=_YORNO,
1462
                              dest="node_powered",
1463
                              help="Specify if the SoR for node is powered")
1464

    
1465
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1466
                             default=constants.OOB_TIMEOUT,
1467
                             help="Maximum time to wait for out-of-band helper")
1468

    
1469
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1470
                             default=constants.OOB_POWER_DELAY,
1471
                             help="Time in seconds to wait between power-ons")
1472

    
1473
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1474
                              action="store_true", default=False,
1475
                              help=("Whether command argument should be treated"
1476
                                    " as filter"))
1477

    
1478
NO_REMEMBER_OPT = cli_option("--no-remember",
1479
                             dest="no_remember",
1480
                             action="store_true", default=False,
1481
                             help="Perform but do not record the change"
1482
                             " in the configuration")
1483

    
1484
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1485
                              default=False, action="store_true",
1486
                              help="Evacuate primary instances only")
1487

    
1488
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1489
                                default=False, action="store_true",
1490
                                help="Evacuate secondary instances only"
1491
                                     " (applies only to internally mirrored"
1492
                                     " disk templates, e.g. %s)" %
1493
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1494

    
1495
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1496
                                action="store_true", default=False,
1497
                                help="Pause instance at startup")
1498

    
1499
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1500
                          help="Destination node group (name or uuid)",
1501
                          default=None, action="append",
1502
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1503

    
1504
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1505
                               action="append", dest="ignore_errors",
1506
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1507
                               help="Error code to be ignored")
1508

    
1509
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1510
                            action="append",
1511
                            help=("Specify disk state information in the"
1512
                                  " format"
1513
                                  " storage_type/identifier:option=value,...;"
1514
                                  " note this is unused for now"),
1515
                            type="identkeyval")
1516

    
1517
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1518
                          action="append",
1519
                          help=("Specify hypervisor state information in the"
1520
                                " format hypervisor:option=value,...;"
1521
                                " note this is unused for now"),
1522
                          type="identkeyval")
1523

    
1524
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1525
                                action="store_true", default=False,
1526
                                help="Ignore instance policy violations")
1527

    
1528
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1529
                             help="Sets the instance's runtime memory,"
1530
                             " ballooning it up or down to the new value",
1531
                             default=None, type="unit", metavar="<size>")
1532

    
1533
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1534
                          action="store_true", default=False,
1535
                          help="Marks the grow as absolute instead of the"
1536
                          " (default) relative mode")
1537

    
1538
NETWORK_OPT = cli_option("--network",
1539
                         action="store", default=None, dest="network",
1540
                         help="IP network in CIDR notation")
1541

    
1542
GATEWAY_OPT = cli_option("--gateway",
1543
                         action="store", default=None, dest="gateway",
1544
                         help="IP address of the router (gateway)")
1545

    
1546
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1547
                                  action="store", default=None,
1548
                                  dest="add_reserved_ips",
1549
                                  help="Comma-separated list of"
1550
                                  " reserved IPs to add")
1551

    
1552
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1553
                                     action="store", default=None,
1554
                                     dest="remove_reserved_ips",
1555
                                     help="Comma-delimited list of"
1556
                                     " reserved IPs to remove")
1557

    
1558
NETWORK6_OPT = cli_option("--network6",
1559
                          action="store", default=None, dest="network6",
1560
                          help="IP network in CIDR notation")
1561

    
1562
GATEWAY6_OPT = cli_option("--gateway6",
1563
                          action="store", default=None, dest="gateway6",
1564
                          help="IP6 address of the router (gateway)")
1565

    
1566
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1567
                                  dest="conflicts_check",
1568
                                  default=True,
1569
                                  action="store_false",
1570
                                  help="Don't check for conflicting IPs")
1571

    
1572
#: Options provided by all commands
1573
COMMON_OPTS = [DEBUG_OPT]
1574

    
1575
# common options for creating instances. add and import then add their own
1576
# specific ones.
1577
COMMON_CREATE_OPTS = [
1578
  BACKEND_OPT,
1579
  DISK_OPT,
1580
  DISK_TEMPLATE_OPT,
1581
  FILESTORE_DIR_OPT,
1582
  FILESTORE_DRIVER_OPT,
1583
  HYPERVISOR_OPT,
1584
  IALLOCATOR_OPT,
1585
  NET_OPT,
1586
  NODE_PLACEMENT_OPT,
1587
  NOIPCHECK_OPT,
1588
  NOCONFLICTSCHECK_OPT,
1589
  NONAMECHECK_OPT,
1590
  NONICS_OPT,
1591
  NWSYNC_OPT,
1592
  OSPARAMS_OPT,
1593
  OS_SIZE_OPT,
1594
  SUBMIT_OPT,
1595
  TAG_ADD_OPT,
1596
  DRY_RUN_OPT,
1597
  PRIORITY_OPT,
1598
  ]
1599

    
1600
# common instance policy options
1601
INSTANCE_POLICY_OPTS = [
1602
  SPECS_CPU_COUNT_OPT,
1603
  SPECS_DISK_COUNT_OPT,
1604
  SPECS_DISK_SIZE_OPT,
1605
  SPECS_MEM_SIZE_OPT,
1606
  SPECS_NIC_COUNT_OPT,
1607
  IPOLICY_DISK_TEMPLATES,
1608
  IPOLICY_VCPU_RATIO,
1609
  IPOLICY_SPINDLE_RATIO,
1610
  ]
1611

    
1612

    
1613
class _ShowUsage(Exception):
1614
  """Exception class for L{_ParseArgs}.
1615

1616
  """
1617
  def __init__(self, exit_error):
1618
    """Initializes instances of this class.
1619

1620
    @type exit_error: bool
1621
    @param exit_error: Whether to report failure on exit
1622

1623
    """
1624
    Exception.__init__(self)
1625
    self.exit_error = exit_error
1626

    
1627

    
1628
class _ShowVersion(Exception):
1629
  """Exception class for L{_ParseArgs}.
1630

1631
  """
1632

    
1633

    
1634
def _ParseArgs(binary, argv, commands, aliases, env_override):
1635
  """Parser for the command line arguments.
1636

1637
  This function parses the arguments and returns the function which
1638
  must be executed together with its (modified) arguments.
1639

1640
  @param binary: Script name
1641
  @param argv: Command line arguments
1642
  @param commands: Dictionary containing command definitions
1643
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1644
  @param env_override: list of env variables allowed for default args
1645
  @raise _ShowUsage: If usage description should be shown
1646
  @raise _ShowVersion: If version should be shown
1647

1648
  """
1649
  assert not (env_override - set(commands))
1650
  assert not (set(aliases.keys()) & set(commands.keys()))
1651

    
1652
  if len(argv) > 1:
1653
    cmd = argv[1]
1654
  else:
1655
    # No option or command given
1656
    raise _ShowUsage(exit_error=True)
1657

    
1658
  if cmd == "--version":
1659
    raise _ShowVersion()
1660
  elif cmd == "--help":
1661
    raise _ShowUsage(exit_error=False)
1662
  elif not (cmd in commands or cmd in aliases):
1663
    raise _ShowUsage(exit_error=True)
1664

    
1665
  # get command, unalias it, and look it up in commands
1666
  if cmd in aliases:
1667
    if aliases[cmd] not in commands:
1668
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1669
                                   " command '%s'" % (cmd, aliases[cmd]))
1670

    
1671
    cmd = aliases[cmd]
1672

    
1673
  if cmd in env_override:
1674
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1675
    env_args = os.environ.get(args_env_name)
1676
    if env_args:
1677
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1678

    
1679
  func, args_def, parser_opts, usage, description = commands[cmd]
1680
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1681
                        description=description,
1682
                        formatter=TitledHelpFormatter(),
1683
                        usage="%%prog %s %s" % (cmd, usage))
1684
  parser.disable_interspersed_args()
1685
  options, args = parser.parse_args(args=argv[2:])
1686

    
1687
  if not _CheckArguments(cmd, args_def, args):
1688
    return None, None, None
1689

    
1690
  return func, options, args
1691

    
1692

    
1693
def _FormatUsage(binary, commands):
1694
  """Generates a nice description of all commands.
1695

1696
  @param binary: Script name
1697
  @param commands: Dictionary containing command definitions
1698

1699
  """
1700
  # compute the max line length for cmd + usage
1701
  mlen = min(60, max(map(len, commands)))
1702

    
1703
  yield "Usage: %s {command} [options...] [argument...]" % binary
1704
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1705
  yield ""
1706
  yield "Commands:"
1707

    
1708
  # and format a nice command list
1709
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1710
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1711
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1712
    for line in help_lines:
1713
      yield " %-*s   %s" % (mlen, "", line)
1714

    
1715
  yield ""
1716

    
1717

    
1718
def _CheckArguments(cmd, args_def, args):
1719
  """Verifies the arguments using the argument definition.
1720

1721
  Algorithm:
1722

1723
    1. Abort with error if values specified by user but none expected.
1724

1725
    1. For each argument in definition
1726

1727
      1. Keep running count of minimum number of values (min_count)
1728
      1. Keep running count of maximum number of values (max_count)
1729
      1. If it has an unlimited number of values
1730

1731
        1. Abort with error if it's not the last argument in the definition
1732

1733
    1. If last argument has limited number of values
1734

1735
      1. Abort with error if number of values doesn't match or is too large
1736

1737
    1. Abort with error if user didn't pass enough values (min_count)
1738

1739
  """
1740
  if args and not args_def:
1741
    ToStderr("Error: Command %s expects no arguments", cmd)
1742
    return False
1743

    
1744
  min_count = None
1745
  max_count = None
1746
  check_max = None
1747

    
1748
  last_idx = len(args_def) - 1
1749

    
1750
  for idx, arg in enumerate(args_def):
1751
    if min_count is None:
1752
      min_count = arg.min
1753
    elif arg.min is not None:
1754
      min_count += arg.min
1755

    
1756
    if max_count is None:
1757
      max_count = arg.max
1758
    elif arg.max is not None:
1759
      max_count += arg.max
1760

    
1761
    if idx == last_idx:
1762
      check_max = (arg.max is not None)
1763

    
1764
    elif arg.max is None:
1765
      raise errors.ProgrammerError("Only the last argument can have max=None")
1766

    
1767
  if check_max:
1768
    # Command with exact number of arguments
1769
    if (min_count is not None and max_count is not None and
1770
        min_count == max_count and len(args) != min_count):
1771
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1772
      return False
1773

    
1774
    # Command with limited number of arguments
1775
    if max_count is not None and len(args) > max_count:
1776
      ToStderr("Error: Command %s expects only %d argument(s)",
1777
               cmd, max_count)
1778
      return False
1779

    
1780
  # Command with some required arguments
1781
  if min_count is not None and len(args) < min_count:
1782
    ToStderr("Error: Command %s expects at least %d argument(s)",
1783
             cmd, min_count)
1784
    return False
1785

    
1786
  return True
1787

    
1788

    
1789
def SplitNodeOption(value):
1790
  """Splits the value of a --node option.
1791

1792
  """
1793
  if value and ":" in value:
1794
    return value.split(":", 1)
1795
  else:
1796
    return (value, None)
1797

    
1798

    
1799
def CalculateOSNames(os_name, os_variants):
1800
  """Calculates all the names an OS can be called, according to its variants.
1801

1802
  @type os_name: string
1803
  @param os_name: base name of the os
1804
  @type os_variants: list or None
1805
  @param os_variants: list of supported variants
1806
  @rtype: list
1807
  @return: list of valid names
1808

1809
  """
1810
  if os_variants:
1811
    return ["%s+%s" % (os_name, v) for v in os_variants]
1812
  else:
1813
    return [os_name]
1814

    
1815

    
1816
def ParseFields(selected, default):
1817
  """Parses the values of "--field"-like options.
1818

1819
  @type selected: string or None
1820
  @param selected: User-selected options
1821
  @type default: list
1822
  @param default: Default fields
1823

1824
  """
1825
  if selected is None:
1826
    return default
1827

    
1828
  if selected.startswith("+"):
1829
    return default + selected[1:].split(",")
1830

    
1831
  return selected.split(",")
1832

    
1833

    
1834
UsesRPC = rpc.RunWithRPC
1835

    
1836

    
1837
def AskUser(text, choices=None):
1838
  """Ask the user a question.
1839

1840
  @param text: the question to ask
1841

1842
  @param choices: list with elements tuples (input_char, return_value,
1843
      description); if not given, it will default to: [('y', True,
1844
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1845
      note that the '?' char is reserved for help
1846

1847
  @return: one of the return values from the choices list; if input is
1848
      not possible (i.e. not running with a tty, we return the last
1849
      entry from the list
1850

1851
  """
1852
  if choices is None:
1853
    choices = [("y", True, "Perform the operation"),
1854
               ("n", False, "Do not perform the operation")]
1855
  if not choices or not isinstance(choices, list):
1856
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1857
  for entry in choices:
1858
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1859
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1860

    
1861
  answer = choices[-1][1]
1862
  new_text = []
1863
  for line in text.splitlines():
1864
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1865
  text = "\n".join(new_text)
1866
  try:
1867
    f = file("/dev/tty", "a+")
1868
  except IOError:
1869
    return answer
1870
  try:
1871
    chars = [entry[0] for entry in choices]
1872
    chars[-1] = "[%s]" % chars[-1]
1873
    chars.append("?")
1874
    maps = dict([(entry[0], entry[1]) for entry in choices])
1875
    while True:
1876
      f.write(text)
1877
      f.write("\n")
1878
      f.write("/".join(chars))
1879
      f.write(": ")
1880
      line = f.readline(2).strip().lower()
1881
      if line in maps:
1882
        answer = maps[line]
1883
        break
1884
      elif line == "?":
1885
        for entry in choices:
1886
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1887
        f.write("\n")
1888
        continue
1889
  finally:
1890
    f.close()
1891
  return answer
1892

    
1893

    
1894
class JobSubmittedException(Exception):
1895
  """Job was submitted, client should exit.
1896

1897
  This exception has one argument, the ID of the job that was
1898
  submitted. The handler should print this ID.
1899

1900
  This is not an error, just a structured way to exit from clients.
1901

1902
  """
1903

    
1904

    
1905
def SendJob(ops, cl=None):
1906
  """Function to submit an opcode without waiting for the results.
1907

1908
  @type ops: list
1909
  @param ops: list of opcodes
1910
  @type cl: luxi.Client
1911
  @param cl: the luxi client to use for communicating with the master;
1912
             if None, a new client will be created
1913

1914
  """
1915
  if cl is None:
1916
    cl = GetClient()
1917

    
1918
  job_id = cl.SubmitJob(ops)
1919

    
1920
  return job_id
1921

    
1922

    
1923
def GenericPollJob(job_id, cbs, report_cbs):
1924
  """Generic job-polling function.
1925

1926
  @type job_id: number
1927
  @param job_id: Job ID
1928
  @type cbs: Instance of L{JobPollCbBase}
1929
  @param cbs: Data callbacks
1930
  @type report_cbs: Instance of L{JobPollReportCbBase}
1931
  @param report_cbs: Reporting callbacks
1932

1933
  """
1934
  prev_job_info = None
1935
  prev_logmsg_serial = None
1936

    
1937
  status = None
1938

    
1939
  while True:
1940
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1941
                                      prev_logmsg_serial)
1942
    if not result:
1943
      # job not found, go away!
1944
      raise errors.JobLost("Job with id %s lost" % job_id)
1945

    
1946
    if result == constants.JOB_NOTCHANGED:
1947
      report_cbs.ReportNotChanged(job_id, status)
1948

    
1949
      # Wait again
1950
      continue
1951

    
1952
    # Split result, a tuple of (field values, log entries)
1953
    (job_info, log_entries) = result
1954
    (status, ) = job_info
1955

    
1956
    if log_entries:
1957
      for log_entry in log_entries:
1958
        (serial, timestamp, log_type, message) = log_entry
1959
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1960
                                    log_type, message)
1961
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1962

    
1963
    # TODO: Handle canceled and archived jobs
1964
    elif status in (constants.JOB_STATUS_SUCCESS,
1965
                    constants.JOB_STATUS_ERROR,
1966
                    constants.JOB_STATUS_CANCELING,
1967
                    constants.JOB_STATUS_CANCELED):
1968
      break
1969

    
1970
    prev_job_info = job_info
1971

    
1972
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1973
  if not jobs:
1974
    raise errors.JobLost("Job with id %s lost" % job_id)
1975

    
1976
  status, opstatus, result = jobs[0]
1977

    
1978
  if status == constants.JOB_STATUS_SUCCESS:
1979
    return result
1980

    
1981
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1982
    raise errors.OpExecError("Job was canceled")
1983

    
1984
  has_ok = False
1985
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1986
    if status == constants.OP_STATUS_SUCCESS:
1987
      has_ok = True
1988
    elif status == constants.OP_STATUS_ERROR:
1989
      errors.MaybeRaise(msg)
1990

    
1991
      if has_ok:
1992
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1993
                                 (idx, msg))
1994

    
1995
      raise errors.OpExecError(str(msg))
1996

    
1997
  # default failure mode
1998
  raise errors.OpExecError(result)
1999

    
2000

    
2001
class JobPollCbBase:
2002
  """Base class for L{GenericPollJob} callbacks.
2003

2004
  """
2005
  def __init__(self):
2006
    """Initializes this class.
2007

2008
    """
2009

    
2010
  def WaitForJobChangeOnce(self, job_id, fields,
2011
                           prev_job_info, prev_log_serial):
2012
    """Waits for changes on a job.
2013

2014
    """
2015
    raise NotImplementedError()
2016

    
2017
  def QueryJobs(self, job_ids, fields):
2018
    """Returns the selected fields for the selected job IDs.
2019

2020
    @type job_ids: list of numbers
2021
    @param job_ids: Job IDs
2022
    @type fields: list of strings
2023
    @param fields: Fields
2024

2025
    """
2026
    raise NotImplementedError()
2027

    
2028

    
2029
class JobPollReportCbBase:
2030
  """Base class for L{GenericPollJob} reporting callbacks.
2031

2032
  """
2033
  def __init__(self):
2034
    """Initializes this class.
2035

2036
    """
2037

    
2038
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2039
    """Handles a log message.
2040

2041
    """
2042
    raise NotImplementedError()
2043

    
2044
  def ReportNotChanged(self, job_id, status):
2045
    """Called for if a job hasn't changed in a while.
2046

2047
    @type job_id: number
2048
    @param job_id: Job ID
2049
    @type status: string or None
2050
    @param status: Job status if available
2051

2052
    """
2053
    raise NotImplementedError()
2054

    
2055

    
2056
class _LuxiJobPollCb(JobPollCbBase):
2057
  def __init__(self, cl):
2058
    """Initializes this class.
2059

2060
    """
2061
    JobPollCbBase.__init__(self)
2062
    self.cl = cl
2063

    
2064
  def WaitForJobChangeOnce(self, job_id, fields,
2065
                           prev_job_info, prev_log_serial):
2066
    """Waits for changes on a job.
2067

2068
    """
2069
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2070
                                        prev_job_info, prev_log_serial)
2071

    
2072
  def QueryJobs(self, job_ids, fields):
2073
    """Returns the selected fields for the selected job IDs.
2074

2075
    """
2076
    return self.cl.QueryJobs(job_ids, fields)
2077

    
2078

    
2079
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2080
  def __init__(self, feedback_fn):
2081
    """Initializes this class.
2082

2083
    """
2084
    JobPollReportCbBase.__init__(self)
2085

    
2086
    self.feedback_fn = feedback_fn
2087

    
2088
    assert callable(feedback_fn)
2089

    
2090
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2091
    """Handles a log message.
2092

2093
    """
2094
    self.feedback_fn((timestamp, log_type, log_msg))
2095

    
2096
  def ReportNotChanged(self, job_id, status):
2097
    """Called if a job hasn't changed in a while.
2098

2099
    """
2100
    # Ignore
2101

    
2102

    
2103
class StdioJobPollReportCb(JobPollReportCbBase):
2104
  def __init__(self):
2105
    """Initializes this class.
2106

2107
    """
2108
    JobPollReportCbBase.__init__(self)
2109

    
2110
    self.notified_queued = False
2111
    self.notified_waitlock = False
2112

    
2113
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2114
    """Handles a log message.
2115

2116
    """
2117
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2118
             FormatLogMessage(log_type, log_msg))
2119

    
2120
  def ReportNotChanged(self, job_id, status):
2121
    """Called if a job hasn't changed in a while.
2122

2123
    """
2124
    if status is None:
2125
      return
2126

    
2127
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2128
      ToStderr("Job %s is waiting in queue", job_id)
2129
      self.notified_queued = True
2130

    
2131
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2132
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2133
      self.notified_waitlock = True
2134

    
2135

    
2136
def FormatLogMessage(log_type, log_msg):
2137
  """Formats a job message according to its type.
2138

2139
  """
2140
  if log_type != constants.ELOG_MESSAGE:
2141
    log_msg = str(log_msg)
2142

    
2143
  return utils.SafeEncode(log_msg)
2144

    
2145

    
2146
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2147
  """Function to poll for the result of a job.
2148

2149
  @type job_id: job identified
2150
  @param job_id: the job to poll for results
2151
  @type cl: luxi.Client
2152
  @param cl: the luxi client to use for communicating with the master;
2153
             if None, a new client will be created
2154

2155
  """
2156
  if cl is None:
2157
    cl = GetClient()
2158

    
2159
  if reporter is None:
2160
    if feedback_fn:
2161
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2162
    else:
2163
      reporter = StdioJobPollReportCb()
2164
  elif feedback_fn:
2165
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2166

    
2167
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2168

    
2169

    
2170
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2171
  """Legacy function to submit an opcode.
2172

2173
  This is just a simple wrapper over the construction of the processor
2174
  instance. It should be extended to better handle feedback and
2175
  interaction functions.
2176

2177
  """
2178
  if cl is None:
2179
    cl = GetClient()
2180

    
2181
  SetGenericOpcodeOpts([op], opts)
2182

    
2183
  job_id = SendJob([op], cl=cl)
2184

    
2185
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2186
                       reporter=reporter)
2187

    
2188
  return op_results[0]
2189

    
2190

    
2191
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2192
  """Wrapper around SubmitOpCode or SendJob.
2193

2194
  This function will decide, based on the 'opts' parameter, whether to
2195
  submit and wait for the result of the opcode (and return it), or
2196
  whether to just send the job and print its identifier. It is used in
2197
  order to simplify the implementation of the '--submit' option.
2198

2199
  It will also process the opcodes if we're sending the via SendJob
2200
  (otherwise SubmitOpCode does it).
2201

2202
  """
2203
  if opts and opts.submit_only:
2204
    job = [op]
2205
    SetGenericOpcodeOpts(job, opts)
2206
    job_id = SendJob(job, cl=cl)
2207
    raise JobSubmittedException(job_id)
2208
  else:
2209
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2210

    
2211

    
2212
def SetGenericOpcodeOpts(opcode_list, options):
2213
  """Processor for generic options.
2214

2215
  This function updates the given opcodes based on generic command
2216
  line options (like debug, dry-run, etc.).
2217

2218
  @param opcode_list: list of opcodes
2219
  @param options: command line options or None
2220
  @return: None (in-place modification)
2221

2222
  """
2223
  if not options:
2224
    return
2225
  for op in opcode_list:
2226
    op.debug_level = options.debug
2227
    if hasattr(options, "dry_run"):
2228
      op.dry_run = options.dry_run
2229
    if getattr(options, "priority", None) is not None:
2230
      op.priority = options.priority
2231

    
2232

    
2233
def GetClient(query=False):
2234
  """Connects to the a luxi socket and returns a client.
2235

2236
  @type query: boolean
2237
  @param query: this signifies that the client will only be
2238
      used for queries; if the build-time parameter
2239
      enable-split-queries is enabled, then the client will be
2240
      connected to the query socket instead of the masterd socket
2241

2242
  """
2243
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2244
  if override_socket:
2245
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2246
      address = pathutils.MASTER_SOCKET
2247
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2248
      address = pathutils.QUERY_SOCKET
2249
    else:
2250
      address = override_socket
2251
  elif query and constants.ENABLE_SPLIT_QUERY:
2252
    address = pathutils.QUERY_SOCKET
2253
  else:
2254
    address = None
2255
  # TODO: Cache object?
2256
  try:
2257
    client = luxi.Client(address=address)
2258
  except luxi.NoMasterError:
2259
    ss = ssconf.SimpleStore()
2260

    
2261
    # Try to read ssconf file
2262
    try:
2263
      ss.GetMasterNode()
2264
    except errors.ConfigurationError:
2265
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2266
                                 " not part of a cluster",
2267
                                 errors.ECODE_INVAL)
2268

    
2269
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2270
    if master != myself:
2271
      raise errors.OpPrereqError("This is not the master node, please connect"
2272
                                 " to node '%s' and rerun the command" %
2273
                                 master, errors.ECODE_INVAL)
2274
    raise
2275
  return client
2276

    
2277

    
2278
def FormatError(err):
2279
  """Return a formatted error message for a given error.
2280

2281
  This function takes an exception instance and returns a tuple
2282
  consisting of two values: first, the recommended exit code, and
2283
  second, a string describing the error message (not
2284
  newline-terminated).
2285

2286
  """
2287
  retcode = 1
2288
  obuf = StringIO()
2289
  msg = str(err)
2290
  if isinstance(err, errors.ConfigurationError):
2291
    txt = "Corrupt configuration file: %s" % msg
2292
    logging.error(txt)
2293
    obuf.write(txt + "\n")
2294
    obuf.write("Aborting.")
2295
    retcode = 2
2296
  elif isinstance(err, errors.HooksAbort):
2297
    obuf.write("Failure: hooks execution failed:\n")
2298
    for node, script, out in err.args[0]:
2299
      if out:
2300
        obuf.write("  node: %s, script: %s, output: %s\n" %
2301
                   (node, script, out))
2302
      else:
2303
        obuf.write("  node: %s, script: %s (no output)\n" %
2304
                   (node, script))
2305
  elif isinstance(err, errors.HooksFailure):
2306
    obuf.write("Failure: hooks general failure: %s" % msg)
2307
  elif isinstance(err, errors.ResolverError):
2308
    this_host = netutils.Hostname.GetSysName()
2309
    if err.args[0] == this_host:
2310
      msg = "Failure: can't resolve my own hostname ('%s')"
2311
    else:
2312
      msg = "Failure: can't resolve hostname '%s'"
2313
    obuf.write(msg % err.args[0])
2314
  elif isinstance(err, errors.OpPrereqError):
2315
    if len(err.args) == 2:
2316
      obuf.write("Failure: prerequisites not met for this"
2317
                 " operation:\nerror type: %s, error details:\n%s" %
2318
                 (err.args[1], err.args[0]))
2319
    else:
2320
      obuf.write("Failure: prerequisites not met for this"
2321
                 " operation:\n%s" % msg)
2322
  elif isinstance(err, errors.OpExecError):
2323
    obuf.write("Failure: command execution error:\n%s" % msg)
2324
  elif isinstance(err, errors.TagError):
2325
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2326
  elif isinstance(err, errors.JobQueueDrainError):
2327
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2328
               " accept new requests\n")
2329
  elif isinstance(err, errors.JobQueueFull):
2330
    obuf.write("Failure: the job queue is full and doesn't accept new"
2331
               " job submissions until old jobs are archived\n")
2332
  elif isinstance(err, errors.TypeEnforcementError):
2333
    obuf.write("Parameter Error: %s" % msg)
2334
  elif isinstance(err, errors.ParameterError):
2335
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2336
  elif isinstance(err, luxi.NoMasterError):
2337
    if err.args[0] == pathutils.MASTER_SOCKET:
2338
      daemon = "the master daemon"
2339
    elif err.args[0] == pathutils.QUERY_SOCKET:
2340
      daemon = "the config daemon"
2341
    else:
2342
      daemon = "socket '%s'" % str(err.args[0])
2343
    obuf.write("Cannot communicate with %s.\nIs the process running"
2344
               " and listening for connections?" % daemon)
2345
  elif isinstance(err, luxi.TimeoutError):
2346
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2347
               " been submitted and will continue to run even if the call"
2348
               " timed out. Useful commands in this situation are \"gnt-job"
2349
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2350
    obuf.write(msg)
2351
  elif isinstance(err, luxi.PermissionError):
2352
    obuf.write("It seems you don't have permissions to connect to the"
2353
               " master daemon.\nPlease retry as a different user.")
2354
  elif isinstance(err, luxi.ProtocolError):
2355
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2356
               "%s" % msg)
2357
  elif isinstance(err, errors.JobLost):
2358
    obuf.write("Error checking job status: %s" % msg)
2359
  elif isinstance(err, errors.QueryFilterParseError):
2360
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2361
    obuf.write("\n".join(err.GetDetails()))
2362
  elif isinstance(err, errors.GenericError):
2363
    obuf.write("Unhandled Ganeti error: %s" % msg)
2364
  elif isinstance(err, JobSubmittedException):
2365
    obuf.write("JobID: %s\n" % err.args[0])
2366
    retcode = 0
2367
  else:
2368
    obuf.write("Unhandled exception: %s" % msg)
2369
  return retcode, obuf.getvalue().rstrip("\n")
2370

    
2371

    
2372
def GenericMain(commands, override=None, aliases=None,
2373
                env_override=frozenset()):
2374
  """Generic main function for all the gnt-* commands.
2375

2376
  @param commands: a dictionary with a special structure, see the design doc
2377
                   for command line handling.
2378
  @param override: if not None, we expect a dictionary with keys that will
2379
                   override command line options; this can be used to pass
2380
                   options from the scripts to generic functions
2381
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2382
  @param env_override: list of environment names which are allowed to submit
2383
                       default args for commands
2384

2385
  """
2386
  # save the program name and the entire command line for later logging
2387
  if sys.argv:
2388
    binary = os.path.basename(sys.argv[0])
2389
    if not binary:
2390
      binary = sys.argv[0]
2391

    
2392
    if len(sys.argv) >= 2:
2393
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2394
    else:
2395
      logname = binary
2396

    
2397
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2398
  else:
2399
    binary = "<unknown program>"
2400
    cmdline = "<unknown>"
2401

    
2402
  if aliases is None:
2403
    aliases = {}
2404

    
2405
  try:
2406
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2407
                                       env_override)
2408
  except _ShowVersion:
2409
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2410
             constants.RELEASE_VERSION)
2411
    return constants.EXIT_SUCCESS
2412
  except _ShowUsage, err:
2413
    for line in _FormatUsage(binary, commands):
2414
      ToStdout(line)
2415

    
2416
    if err.exit_error:
2417
      return constants.EXIT_FAILURE
2418
    else:
2419
      return constants.EXIT_SUCCESS
2420
  except errors.ParameterError, err:
2421
    result, err_msg = FormatError(err)
2422
    ToStderr(err_msg)
2423
    return 1
2424

    
2425
  if func is None: # parse error
2426
    return 1
2427

    
2428
  if override is not None:
2429
    for key, val in override.iteritems():
2430
      setattr(options, key, val)
2431

    
2432
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2433
                     stderr_logging=True)
2434

    
2435
  logging.info("Command line: %s", cmdline)
2436

    
2437
  try:
2438
    result = func(options, args)
2439
  except (errors.GenericError, luxi.ProtocolError,
2440
          JobSubmittedException), err:
2441
    result, err_msg = FormatError(err)
2442
    logging.exception("Error during command processing")
2443
    ToStderr(err_msg)
2444
  except KeyboardInterrupt:
2445
    result = constants.EXIT_FAILURE
2446
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2447
             " might have been submitted and"
2448
             " will continue to run in the background.")
2449
  except IOError, err:
2450
    if err.errno == errno.EPIPE:
2451
      # our terminal went away, we'll exit
2452
      sys.exit(constants.EXIT_FAILURE)
2453
    else:
2454
      raise
2455

    
2456
  return result
2457

    
2458

    
2459
def ParseNicOption(optvalue):
2460
  """Parses the value of the --net option(s).
2461

2462
  """
2463
  try:
2464
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2465
  except (TypeError, ValueError), err:
2466
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2467
                               errors.ECODE_INVAL)
2468

    
2469
  nics = [{}] * nic_max
2470
  for nidx, ndict in optvalue:
2471
    nidx = int(nidx)
2472

    
2473
    if not isinstance(ndict, dict):
2474
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2475
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2476

    
2477
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2478

    
2479
    nics[nidx] = ndict
2480

    
2481
  return nics
2482

    
2483

    
2484
def GenericInstanceCreate(mode, opts, args):
2485
  """Add an instance to the cluster via either creation or import.
2486

2487
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2488
  @param opts: the command line options selected by the user
2489
  @type args: list
2490
  @param args: should contain only one element, the new instance name
2491
  @rtype: int
2492
  @return: the desired exit code
2493

2494
  """
2495
  instance = args[0]
2496

    
2497
  (pnode, snode) = SplitNodeOption(opts.node)
2498

    
2499
  hypervisor = None
2500
  hvparams = {}
2501
  if opts.hypervisor:
2502
    hypervisor, hvparams = opts.hypervisor
2503

    
2504
  if opts.nics:
2505
    nics = ParseNicOption(opts.nics)
2506
  elif opts.no_nics:
2507
    # no nics
2508
    nics = []
2509
  elif mode == constants.INSTANCE_CREATE:
2510
    # default of one nic, all auto
2511
    nics = [{}]
2512
  else:
2513
    # mode == import
2514
    nics = []
2515

    
2516
  if opts.disk_template == constants.DT_DISKLESS:
2517
    if opts.disks or opts.sd_size is not None:
2518
      raise errors.OpPrereqError("Diskless instance but disk"
2519
                                 " information passed", errors.ECODE_INVAL)
2520
    disks = []
2521
  else:
2522
    if (not opts.disks and not opts.sd_size
2523
        and mode == constants.INSTANCE_CREATE):
2524
      raise errors.OpPrereqError("No disk information specified",
2525
                                 errors.ECODE_INVAL)
2526
    if opts.disks and opts.sd_size is not None:
2527
      raise errors.OpPrereqError("Please use either the '--disk' or"
2528
                                 " '-s' option", errors.ECODE_INVAL)
2529
    if opts.sd_size is not None:
2530
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2531

    
2532
    if opts.disks:
2533
      try:
2534
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2535
      except ValueError, err:
2536
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2537
                                   errors.ECODE_INVAL)
2538
      disks = [{}] * disk_max
2539
    else:
2540
      disks = []
2541
    for didx, ddict in opts.disks:
2542
      didx = int(didx)
2543
      if not isinstance(ddict, dict):
2544
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2545
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2546
      elif constants.IDISK_SIZE in ddict:
2547
        if constants.IDISK_ADOPT in ddict:
2548
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2549
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2550
        try:
2551
          ddict[constants.IDISK_SIZE] = \
2552
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2553
        except ValueError, err:
2554
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2555
                                     (didx, err), errors.ECODE_INVAL)
2556
      elif constants.IDISK_ADOPT in ddict:
2557
        if mode == constants.INSTANCE_IMPORT:
2558
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2559
                                     " import", errors.ECODE_INVAL)
2560
        ddict[constants.IDISK_SIZE] = 0
2561
      else:
2562
        raise errors.OpPrereqError("Missing size or adoption source for"
2563
                                   " disk %d" % didx, errors.ECODE_INVAL)
2564
      disks[didx] = ddict
2565

    
2566
  if opts.tags is not None:
2567
    tags = opts.tags.split(",")
2568
  else:
2569
    tags = []
2570

    
2571
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2572
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2573

    
2574
  if mode == constants.INSTANCE_CREATE:
2575
    start = opts.start
2576
    os_type = opts.os
2577
    force_variant = opts.force_variant
2578
    src_node = None
2579
    src_path = None
2580
    no_install = opts.no_install
2581
    identify_defaults = False
2582
  elif mode == constants.INSTANCE_IMPORT:
2583
    start = False
2584
    os_type = None
2585
    force_variant = False
2586
    src_node = opts.src_node
2587
    src_path = opts.src_dir
2588
    no_install = None
2589
    identify_defaults = opts.identify_defaults
2590
  else:
2591
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2592

    
2593
  op = opcodes.OpInstanceCreate(instance_name=instance,
2594
                                disks=disks,
2595
                                disk_template=opts.disk_template,
2596
                                nics=nics,
2597
                                conflicts_check=opts.conflicts_check,
2598
                                pnode=pnode, snode=snode,
2599
                                ip_check=opts.ip_check,
2600
                                name_check=opts.name_check,
2601
                                wait_for_sync=opts.wait_for_sync,
2602
                                file_storage_dir=opts.file_storage_dir,
2603
                                file_driver=opts.file_driver,
2604
                                iallocator=opts.iallocator,
2605
                                hypervisor=hypervisor,
2606
                                hvparams=hvparams,
2607
                                beparams=opts.beparams,
2608
                                osparams=opts.osparams,
2609
                                mode=mode,
2610
                                start=start,
2611
                                os_type=os_type,
2612
                                force_variant=force_variant,
2613
                                src_node=src_node,
2614
                                src_path=src_path,
2615
                                tags=tags,
2616
                                no_install=no_install,
2617
                                identify_defaults=identify_defaults,
2618
                                ignore_ipolicy=opts.ignore_ipolicy)
2619

    
2620
  SubmitOrSend(op, opts)
2621
  return 0
2622

    
2623

    
2624
class _RunWhileClusterStoppedHelper:
2625
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2626

2627
  """
2628
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2629
    """Initializes this class.
2630

2631
    @type feedback_fn: callable
2632
    @param feedback_fn: Feedback function
2633
    @type cluster_name: string
2634
    @param cluster_name: Cluster name
2635
    @type master_node: string
2636
    @param master_node Master node name
2637
    @type online_nodes: list
2638
    @param online_nodes: List of names of online nodes
2639

2640
    """
2641
    self.feedback_fn = feedback_fn
2642
    self.cluster_name = cluster_name
2643
    self.master_node = master_node
2644
    self.online_nodes = online_nodes
2645

    
2646
    self.ssh = ssh.SshRunner(self.cluster_name)
2647

    
2648
    self.nonmaster_nodes = [name for name in online_nodes
2649
                            if name != master_node]
2650

    
2651
    assert self.master_node not in self.nonmaster_nodes
2652

    
2653
  def _RunCmd(self, node_name, cmd):
2654
    """Runs a command on the local or a remote machine.
2655

2656
    @type node_name: string
2657
    @param node_name: Machine name
2658
    @type cmd: list
2659
    @param cmd: Command
2660

2661
    """
2662
    if node_name is None or node_name == self.master_node:
2663
      # No need to use SSH
2664
      result = utils.RunCmd(cmd)
2665
    else:
2666
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2667
                            utils.ShellQuoteArgs(cmd))
2668

    
2669
    if result.failed:
2670
      errmsg = ["Failed to run command %s" % result.cmd]
2671
      if node_name:
2672
        errmsg.append("on node %s" % node_name)
2673
      errmsg.append(": exitcode %s and error %s" %
2674
                    (result.exit_code, result.output))
2675
      raise errors.OpExecError(" ".join(errmsg))
2676

    
2677
  def Call(self, fn, *args):
2678
    """Call function while all daemons are stopped.
2679

2680
    @type fn: callable
2681
    @param fn: Function to be called
2682

2683
    """
2684
    # Pause watcher by acquiring an exclusive lock on watcher state file
2685
    self.feedback_fn("Blocking watcher")
2686
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2687
    try:
2688
      # TODO: Currently, this just blocks. There's no timeout.
2689
      # TODO: Should it be a shared lock?
2690
      watcher_block.Exclusive(blocking=True)
2691

    
2692
      # Stop master daemons, so that no new jobs can come in and all running
2693
      # ones are finished
2694
      self.feedback_fn("Stopping master daemons")
2695
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2696
      try:
2697
        # Stop daemons on all nodes
2698
        for node_name in self.online_nodes:
2699
          self.feedback_fn("Stopping daemons on %s" % node_name)
2700
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2701

    
2702
        # All daemons are shut down now
2703
        try:
2704
          return fn(self, *args)
2705
        except Exception, err:
2706
          _, errmsg = FormatError(err)
2707
          logging.exception("Caught exception")
2708
          self.feedback_fn(errmsg)
2709
          raise
2710
      finally:
2711
        # Start cluster again, master node last
2712
        for node_name in self.nonmaster_nodes + [self.master_node]:
2713
          self.feedback_fn("Starting daemons on %s" % node_name)
2714
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2715
    finally:
2716
      # Resume watcher
2717
      watcher_block.Close()
2718

    
2719

    
2720
def RunWhileClusterStopped(feedback_fn, fn, *args):
2721
  """Calls a function while all cluster daemons are stopped.
2722

2723
  @type feedback_fn: callable
2724
  @param feedback_fn: Feedback function
2725
  @type fn: callable
2726
  @param fn: Function to be called when daemons are stopped
2727

2728
  """
2729
  feedback_fn("Gathering cluster information")
2730

    
2731
  # This ensures we're running on the master daemon
2732
  cl = GetClient()
2733

    
2734
  (cluster_name, master_node) = \
2735
    cl.QueryConfigValues(["cluster_name", "master_node"])
2736

    
2737
  online_nodes = GetOnlineNodes([], cl=cl)
2738

    
2739
  # Don't keep a reference to the client. The master daemon will go away.
2740
  del cl
2741

    
2742
  assert master_node in online_nodes
2743

    
2744
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2745
                                       online_nodes).Call(fn, *args)
2746

    
2747

    
2748
def GenerateTable(headers, fields, separator, data,
2749
                  numfields=None, unitfields=None,
2750
                  units=None):
2751
  """Prints a table with headers and different fields.
2752

2753
  @type headers: dict
2754
  @param headers: dictionary mapping field names to headers for
2755
      the table
2756
  @type fields: list
2757
  @param fields: the field names corresponding to each row in
2758
      the data field
2759
  @param separator: the separator to be used; if this is None,
2760
      the default 'smart' algorithm is used which computes optimal
2761
      field width, otherwise just the separator is used between
2762
      each field
2763
  @type data: list
2764
  @param data: a list of lists, each sublist being one row to be output
2765
  @type numfields: list
2766
  @param numfields: a list with the fields that hold numeric
2767
      values and thus should be right-aligned
2768
  @type unitfields: list
2769
  @param unitfields: a list with the fields that hold numeric
2770
      values that should be formatted with the units field
2771
  @type units: string or None
2772
  @param units: the units we should use for formatting, or None for
2773
      automatic choice (human-readable for non-separator usage, otherwise
2774
      megabytes); this is a one-letter string
2775

2776
  """
2777
  if units is None:
2778
    if separator:
2779
      units = "m"
2780
    else:
2781
      units = "h"
2782

    
2783
  if numfields is None:
2784
    numfields = []
2785
  if unitfields is None:
2786
    unitfields = []
2787

    
2788
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2789
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2790

    
2791
  format_fields = []
2792
  for field in fields:
2793
    if headers and field not in headers:
2794
      # TODO: handle better unknown fields (either revert to old
2795
      # style of raising exception, or deal more intelligently with
2796
      # variable fields)
2797
      headers[field] = field
2798
    if separator is not None:
2799
      format_fields.append("%s")
2800
    elif numfields.Matches(field):
2801
      format_fields.append("%*s")
2802
    else:
2803
      format_fields.append("%-*s")
2804

    
2805
  if separator is None:
2806
    mlens = [0 for name in fields]
2807
    format_str = " ".join(format_fields)
2808
  else:
2809
    format_str = separator.replace("%", "%%").join(format_fields)
2810

    
2811
  for row in data:
2812
    if row is None:
2813
      continue
2814
    for idx, val in enumerate(row):
2815
      if unitfields.Matches(fields[idx]):
2816
        try:
2817
          val = int(val)
2818
        except (TypeError, ValueError):
2819
          pass
2820
        else:
2821
          val = row[idx] = utils.FormatUnit(val, units)
2822
      val = row[idx] = str(val)
2823
      if separator is None:
2824
        mlens[idx] = max(mlens[idx], len(val))
2825

    
2826
  result = []
2827
  if headers:
2828
    args = []
2829
    for idx, name in enumerate(fields):
2830
      hdr = headers[name]
2831
      if separator is None:
2832
        mlens[idx] = max(mlens[idx], len(hdr))
2833
        args.append(mlens[idx])
2834
      args.append(hdr)
2835
    result.append(format_str % tuple(args))
2836

    
2837
  if separator is None:
2838
    assert len(mlens) == len(fields)
2839

    
2840
    if fields and not numfields.Matches(fields[-1]):
2841
      mlens[-1] = 0
2842

    
2843
  for line in data:
2844
    args = []
2845
    if line is None:
2846
      line = ["-" for _ in fields]
2847
    for idx in range(len(fields)):
2848
      if separator is None:
2849
        args.append(mlens[idx])
2850
      args.append(line[idx])
2851
    result.append(format_str % tuple(args))
2852

    
2853
  return result
2854

    
2855

    
2856
def _FormatBool(value):
2857
  """Formats a boolean value as a string.
2858

2859
  """
2860
  if value:
2861
    return "Y"
2862
  return "N"
2863

    
2864

    
2865
#: Default formatting for query results; (callback, align right)
2866
_DEFAULT_FORMAT_QUERY = {
2867
  constants.QFT_TEXT: (str, False),
2868
  constants.QFT_BOOL: (_FormatBool, False),
2869
  constants.QFT_NUMBER: (str, True),
2870
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2871
  constants.QFT_OTHER: (str, False),
2872
  constants.QFT_UNKNOWN: (str, False),
2873
  }
2874

    
2875

    
2876
def _GetColumnFormatter(fdef, override, unit):
2877
  """Returns formatting function for a field.
2878

2879
  @type fdef: L{objects.QueryFieldDefinition}
2880
  @type override: dict
2881
  @param override: Dictionary for overriding field formatting functions,
2882
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2883
  @type unit: string
2884
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2885
  @rtype: tuple; (callable, bool)
2886
  @return: Returns the function to format a value (takes one parameter) and a
2887
    boolean for aligning the value on the right-hand side
2888

2889
  """
2890
  fmt = override.get(fdef.name, None)
2891
  if fmt is not None:
2892
    return fmt
2893

    
2894
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2895

    
2896
  if fdef.kind == constants.QFT_UNIT:
2897
    # Can't keep this information in the static dictionary
2898
    return (lambda value: utils.FormatUnit(value, unit), True)
2899

    
2900
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2901
  if fmt is not None:
2902
    return fmt
2903

    
2904
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2905

    
2906

    
2907
class _QueryColumnFormatter:
2908
  """Callable class for formatting fields of a query.
2909

2910
  """
2911
  def __init__(self, fn, status_fn, verbose):
2912
    """Initializes this class.
2913

2914
    @type fn: callable
2915
    @param fn: Formatting function
2916
    @type status_fn: callable
2917
    @param status_fn: Function to report fields' status
2918
    @type verbose: boolean
2919
    @param verbose: whether to use verbose field descriptions or not
2920

2921
    """
2922
    self._fn = fn
2923
    self._status_fn = status_fn
2924
    self._verbose = verbose
2925

    
2926
  def __call__(self, data):
2927
    """Returns a field's string representation.
2928

2929
    """
2930
    (status, value) = data
2931

    
2932
    # Report status
2933
    self._status_fn(status)
2934

    
2935
    if status == constants.RS_NORMAL:
2936
      return self._fn(value)
2937

    
2938
    assert value is None, \
2939
           "Found value %r for abnormal status %s" % (value, status)
2940

    
2941
    return FormatResultError(status, self._verbose)
2942

    
2943

    
2944
def FormatResultError(status, verbose):
2945
  """Formats result status other than L{constants.RS_NORMAL}.
2946

2947
  @param status: The result status
2948
  @type verbose: boolean
2949
  @param verbose: Whether to return the verbose text
2950
  @return: Text of result status
2951

2952
  """
2953
  assert status != constants.RS_NORMAL, \
2954
         "FormatResultError called with status equal to constants.RS_NORMAL"
2955
  try:
2956
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2957
  except KeyError:
2958
    raise NotImplementedError("Unknown status %s" % status)
2959
  else:
2960
    if verbose:
2961
      return verbose_text
2962
    return normal_text
2963

    
2964

    
2965
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2966
                      header=False, verbose=False):
2967
  """Formats data in L{objects.QueryResponse}.
2968

2969
  @type result: L{objects.QueryResponse}
2970
  @param result: result of query operation
2971
  @type unit: string
2972
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2973
    see L{utils.text.FormatUnit}
2974
  @type format_override: dict
2975
  @param format_override: Dictionary for overriding field formatting functions,
2976
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2977
  @type separator: string or None
2978
  @param separator: String used to separate fields
2979
  @type header: bool
2980
  @param header: Whether to output header row
2981
  @type verbose: boolean
2982
  @param verbose: whether to use verbose field descriptions or not
2983

2984
  """
2985
  if unit is None:
2986
    if separator:
2987
      unit = "m"
2988
    else:
2989
      unit = "h"
2990

    
2991
  if format_override is None:
2992
    format_override = {}
2993

    
2994
  stats = dict.fromkeys(constants.RS_ALL, 0)
2995

    
2996
  def _RecordStatus(status):
2997
    if status in stats:
2998
      stats[status] += 1
2999

    
3000
  columns = []
3001
  for fdef in result.fields:
3002
    assert fdef.title and fdef.name
3003
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3004
    columns.append(TableColumn(fdef.title,
3005
                               _QueryColumnFormatter(fn, _RecordStatus,
3006
                                                     verbose),
3007
                               align_right))
3008

    
3009
  table = FormatTable(result.data, columns, header, separator)
3010

    
3011
  # Collect statistics
3012
  assert len(stats) == len(constants.RS_ALL)
3013
  assert compat.all(count >= 0 for count in stats.values())
3014

    
3015
  # Determine overall status. If there was no data, unknown fields must be
3016
  # detected via the field definitions.
3017
  if (stats[constants.RS_UNKNOWN] or
3018
      (not result.data and _GetUnknownFields(result.fields))):
3019
    status = QR_UNKNOWN
3020
  elif compat.any(count > 0 for key, count in stats.items()
3021
                  if key != constants.RS_NORMAL):
3022
    status = QR_INCOMPLETE
3023
  else:
3024
    status = QR_NORMAL
3025

    
3026
  return (status, table)
3027

    
3028

    
3029
def _GetUnknownFields(fdefs):
3030
  """Returns list of unknown fields included in C{fdefs}.
3031

3032
  @type fdefs: list of L{objects.QueryFieldDefinition}
3033

3034
  """
3035
  return [fdef for fdef in fdefs
3036
          if fdef.kind == constants.QFT_UNKNOWN]
3037

    
3038

    
3039
def _WarnUnknownFields(fdefs):
3040
  """Prints a warning to stderr if a query included unknown fields.
3041

3042
  @type fdefs: list of L{objects.QueryFieldDefinition}
3043

3044
  """
3045
  unknown = _GetUnknownFields(fdefs)
3046
  if unknown:
3047
    ToStderr("Warning: Queried for unknown fields %s",
3048
             utils.CommaJoin(fdef.name for fdef in unknown))
3049
    return True
3050

    
3051
  return False
3052

    
3053

    
3054
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3055
                format_override=None, verbose=False, force_filter=False,
3056
                namefield=None, qfilter=None, isnumeric=False):
3057
  """Generic implementation for listing all items of a resource.
3058

3059
  @param resource: One of L{constants.QR_VIA_LUXI}
3060
  @type fields: list of strings
3061
  @param fields: List of fields to query for
3062
  @type names: list of strings
3063
  @param names: Names of items to query for
3064
  @type unit: string or None
3065
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3066
    None for automatic choice (human-readable for non-separator usage,
3067
    otherwise megabytes); this is a one-letter string
3068
  @type separator: string or None
3069
  @param separator: String used to separate fields
3070
  @type header: bool
3071
  @param header: Whether to show header row
3072
  @type force_filter: bool
3073
  @param force_filter: Whether to always treat names as filter
3074
  @type format_override: dict
3075
  @param format_override: Dictionary for overriding field formatting functions,
3076
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3077
  @type verbose: boolean
3078
  @param verbose: whether to use verbose field descriptions or not
3079
  @type namefield: string
3080
  @param namefield: Name of field to use for simple filters (see
3081
    L{qlang.MakeFilter} for details)
3082
  @type qfilter: list or None
3083
  @param qfilter: Query filter (in addition to names)
3084
  @param isnumeric: bool
3085
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3086
    any simple filters built by namefield should use integer values to
3087
    reflect that
3088

3089
  """
3090
  if not names:
3091
    names = None
3092

    
3093
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3094
                                isnumeric=isnumeric)
3095

    
3096
  if qfilter is None:
3097
    qfilter = namefilter
3098
  elif namefilter is not None:
3099
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3100

    
3101
  if cl is None:
3102
    cl = GetClient()
3103

    
3104
  response = cl.Query(resource, fields, qfilter)
3105

    
3106
  found_unknown = _WarnUnknownFields(response.fields)
3107

    
3108
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3109
                                     header=header,
3110
                                     format_override=format_override,
3111
                                     verbose=verbose)
3112

    
3113
  for line in data:
3114
    ToStdout(line)
3115

    
3116
  assert ((found_unknown and status == QR_UNKNOWN) or
3117
          (not found_unknown and status != QR_UNKNOWN))
3118

    
3119
  if status == QR_UNKNOWN:
3120
    return constants.EXIT_UNKNOWN_FIELD
3121

    
3122
  # TODO: Should the list command fail if not all data could be collected?
3123
  return constants.EXIT_SUCCESS
3124

    
3125

    
3126
def _FieldDescValues(fdef):
3127
  """Helper function for L{GenericListFields} to get query field description.
3128

3129
  @type fdef: L{objects.QueryFieldDefinition}
3130
  @rtype: list
3131

3132
  """
3133
  return [
3134
    fdef.name,
3135
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3136
    fdef.title,
3137
    fdef.doc,
3138
    ]
3139

    
3140

    
3141
def GenericListFields(resource, fields, separator, header, cl=None):
3142
  """Generic implementation for listing fields for a resource.
3143

3144
  @param resource: One of L{constants.QR_VIA_LUXI}
3145
  @type fields: list of strings
3146
  @param fields: List of fields to query for
3147
  @type separator: string or None
3148
  @param separator: String used to separate fields
3149
  @type header: bool
3150
  @param header: Whether to show header row
3151

3152
  """
3153
  if cl is None:
3154
    cl = GetClient()
3155

    
3156
  if not fields:
3157
    fields = None
3158

    
3159
  response = cl.QueryFields(resource, fields)
3160

    
3161
  found_unknown = _WarnUnknownFields(response.fields)
3162

    
3163
  columns = [
3164
    TableColumn("Name", str, False),
3165
    TableColumn("Type", str, False),
3166
    TableColumn("Title", str, False),
3167
    TableColumn("Description", str, False),
3168
    ]
3169

    
3170
  rows = map(_FieldDescValues, response.fields)
3171

    
3172
  for line in FormatTable(rows, columns, header, separator):
3173
    ToStdout(line)
3174

    
3175
  if found_unknown:
3176
    return constants.EXIT_UNKNOWN_FIELD
3177

    
3178
  return constants.EXIT_SUCCESS
3179

    
3180

    
3181
class TableColumn:
3182
  """Describes a column for L{FormatTable}.
3183

3184
  """
3185
  def __init__(self, title, fn, align_right):
3186
    """Initializes this class.
3187

3188
    @type title: string
3189
    @param title: Column title
3190
    @type fn: callable
3191
    @param fn: Formatting function
3192
    @type align_right: bool
3193
    @param align_right: Whether to align values on the right-hand side
3194

3195
    """
3196
    self.title = title
3197
    self.format = fn
3198
    self.align_right = align_right
3199

    
3200

    
3201
def _GetColFormatString(width, align_right):
3202
  """Returns the format string for a field.
3203

3204
  """
3205
  if align_right:
3206
    sign = ""
3207
  else:
3208
    sign = "-"
3209

    
3210
  return "%%%s%ss" % (sign, width)
3211

    
3212

    
3213
def FormatTable(rows, columns, header, separator):
3214
  """Formats data as a table.
3215

3216
  @type rows: list of lists
3217
  @param rows: Row data, one list per row
3218
  @type columns: list of L{TableColumn}
3219
  @param columns: Column descriptions
3220
  @type header: bool
3221
  @param header: Whether to show header row
3222
  @type separator: string or None
3223
  @param separator: String used to separate columns
3224

3225
  """
3226
  if header:
3227
    data = [[col.title for col in columns]]
3228
    colwidth = [len(col.title) for col in columns]
3229
  else:
3230
    data = []
3231
    colwidth = [0 for _ in columns]
3232

    
3233
  # Format row data
3234
  for row in rows:
3235
    assert len(row) == len(columns)
3236

    
3237
    formatted = [col.format(value) for value, col in zip(row, columns)]
3238

    
3239
    if separator is None:
3240
      # Update column widths
3241
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3242
        # Modifying a list's items while iterating is fine
3243
        colwidth[idx] = max(oldwidth, len(value))
3244

    
3245
    data.append(formatted)
3246

    
3247
  if separator is not None:
3248
    # Return early if a separator is used
3249
    return [separator.join(row) for row in data]
3250

    
3251
  if columns and not columns[-1].align_right:
3252
    # Avoid unnecessary spaces at end of line
3253
    colwidth[-1] = 0
3254

    
3255
  # Build format string
3256
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3257
                  for col, width in zip(columns, colwidth)])
3258

    
3259
  return [fmt % tuple(row) for row in data]
3260

    
3261

    
3262
def FormatTimestamp(ts):
3263
  """Formats a given timestamp.
3264

3265
  @type ts: timestamp
3266
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3267

3268
  @rtype: string
3269
  @return: a string with the formatted timestamp
3270

3271
  """
3272
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3273
    return "?"
3274

    
3275
  (sec, usecs) = ts
3276
  return utils.FormatTime(sec, usecs=usecs)
3277

    
3278

    
3279
def ParseTimespec(value):
3280
  """Parse a time specification.
3281

3282
  The following suffixed will be recognized:
3283

3284
    - s: seconds
3285
    - m: minutes
3286
    - h: hours
3287
    - d: day
3288
    - w: weeks
3289

3290
  Without any suffix, the value will be taken to be in seconds.
3291

3292
  """
3293
  value = str(value)
3294
  if not value:
3295
    raise errors.OpPrereqError("Empty time specification passed",
3296
                               errors.ECODE_INVAL)
3297
  suffix_map = {
3298
    "s": 1,
3299
    "m": 60,
3300
    "h": 3600,
3301
    "d": 86400,
3302
    "w": 604800,
3303
    }
3304
  if value[-1] not in suffix_map:
3305
    try:
3306
      value = int(value)
3307
    except (TypeError, ValueError):
3308
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3309
                                 errors.ECODE_INVAL)
3310
  else:
3311
    multiplier = suffix_map[value[-1]]
3312
    value = value[:-1]
3313
    if not value: # no data left after stripping the suffix
3314
      raise errors.OpPrereqError("Invalid time specification (only"
3315
                                 " suffix passed)", errors.ECODE_INVAL)
3316
    try:
3317
      value = int(value) * multiplier
3318
    except (TypeError, ValueError):
3319
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3320
                                 errors.ECODE_INVAL)
3321
  return value
3322

    
3323

    
3324
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3325
                   filter_master=False, nodegroup=None):
3326
  """Returns the names of online nodes.
3327

3328
  This function will also log a warning on stderr with the names of
3329
  the online nodes.
3330

3331
  @param nodes: if not empty, use only this subset of nodes (minus the
3332
      offline ones)
3333
  @param cl: if not None, luxi client to use
3334
  @type nowarn: boolean
3335
  @param nowarn: by default, this function will output a note with the
3336
      offline nodes that are skipped; if this parameter is True the
3337
      note is not displayed
3338
  @type secondary_ips: boolean
3339
  @param secondary_ips: if True, return the secondary IPs instead of the
3340
      names, useful for doing network traffic over the replication interface
3341
      (if any)
3342
  @type filter_master: boolean
3343
  @param filter_master: if True, do not return the master node in the list
3344
      (useful in coordination with secondary_ips where we cannot check our
3345
      node name against the list)
3346
  @type nodegroup: string
3347
  @param nodegroup: If set, only return nodes in this node group
3348

3349
  """
3350
  if cl is None:
3351
    cl = GetClient()
3352

    
3353
  qfilter = []
3354

    
3355
  if nodes:
3356
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3357

    
3358
  if nodegroup is not None:
3359
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3360
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3361

    
3362
  if filter_master:
3363
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3364

    
3365
  if qfilter:
3366
    if len(qfilter) > 1:
3367
      final_filter = [qlang.OP_AND] + qfilter
3368
    else:
3369
      assert len(qfilter) == 1
3370
      final_filter = qfilter[0]
3371
  else:
3372
    final_filter = None
3373

    
3374
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3375

    
3376
  def _IsOffline(row):
3377
    (_, (_, offline), _) = row
3378
    return offline
3379

    
3380
  def _GetName(row):
3381
    ((_, name), _, _) = row
3382
    return name
3383

    
3384
  def _GetSip(row):
3385
    (_, _, (_, sip)) = row
3386
    return sip
3387

    
3388
  (offline, online) = compat.partition(result.data, _IsOffline)
3389

    
3390
  if offline and not nowarn:
3391
    ToStderr("Note: skipping offline node(s): %s" %
3392
             utils.CommaJoin(map(_GetName, offline)))
3393

    
3394
  if secondary_ips:
3395
    fn = _GetSip
3396
  else:
3397
    fn = _GetName
3398

    
3399
  return map(fn, online)
3400

    
3401

    
3402
def _ToStream(stream, txt, *args):
3403
  """Write a message to a stream, bypassing the logging system
3404

3405
  @type stream: file object
3406
  @param stream: the file to which we should write
3407
  @type txt: str
3408
  @param txt: the message
3409

3410
  """
3411
  try:
3412
    if args:
3413
      args = tuple(args)
3414
      stream.write(txt % args)
3415
    else:
3416
      stream.write(txt)
3417
    stream.write("\n")
3418
    stream.flush()
3419
  except IOError, err:
3420
    if err.errno == errno.EPIPE:
3421
      # our terminal went away, we'll exit
3422
      sys.exit(constants.EXIT_FAILURE)
3423
    else:
3424
      raise
3425

    
3426

    
3427
def ToStdout(txt, *args):
3428
  """Write a message to stdout only, bypassing the logging system
3429

3430
  This is just a wrapper over _ToStream.
3431

3432
  @type txt: str
3433
  @param txt: the message
3434

3435
  """
3436
  _ToStream(sys.stdout, txt, *args)
3437

    
3438

    
3439
def ToStderr(txt, *args):
3440
  """Write a message to stderr only, bypassing the logging system
3441

3442
  This is just a wrapper over _ToStream.
3443

3444
  @type txt: str
3445
  @param txt: the message
3446

3447
  """
3448
  _ToStream(sys.stderr, txt, *args)
3449

    
3450

    
3451
class JobExecutor(object):
3452
  """Class which manages the submission and execution of multiple jobs.
3453

3454
  Note that instances of this class should not be reused between
3455
  GetResults() calls.
3456

3457
  """
3458
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3459
    self.queue = []
3460
    if cl is None:
3461
      cl = GetClient()
3462
    self.cl = cl
3463
    self.verbose = verbose
3464
    self.jobs = []
3465
    self.opts = opts
3466
    self.feedback_fn = feedback_fn
3467
    self._counter = itertools.count()
3468

    
3469
  @staticmethod
3470
  def _IfName(name, fmt):
3471
    """Helper function for formatting name.
3472

3473
    """
3474
    if name:
3475
      return fmt % name
3476

    
3477
    return ""
3478

    
3479
  def QueueJob(self, name, *ops):
3480
    """Record a job for later submit.
3481

3482
    @type name: string
3483
    @param name: a description of the job, will be used in WaitJobSet
3484

3485
    """
3486
    SetGenericOpcodeOpts(ops, self.opts)
3487
    self.queue.append((self._counter.next(), name, ops))
3488

    
3489
  def AddJobId(self, name, status, job_id):
3490
    """Adds a job ID to the internal queue.
3491

3492
    """
3493
    self.jobs.append((self._counter.next(), status, job_id, name))
3494

    
3495
  def SubmitPending(self, each=False):
3496
    """Submit all pending jobs.
3497

3498
    """
3499
    if each:
3500
      results = []
3501
      for (_, _, ops) in self.queue:
3502
        # SubmitJob will remove the success status, but raise an exception if
3503
        # the submission fails, so we'll notice that anyway.
3504
        results.append([True, self.cl.SubmitJob(ops)[0]])
3505
    else:
3506
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3507
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3508
      self.jobs.append((idx, status, data, name))
3509

    
3510
  def _ChooseJob(self):
3511
    """Choose a non-waiting/queued job to poll next.
3512

3513
    """
3514
    assert self.jobs, "_ChooseJob called with empty job list"
3515

    
3516
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3517
                               ["status"])
3518
    assert result
3519

    
3520
    for job_data, status in zip(self.jobs, result):
3521
      if (isinstance(status, list) and status and
3522
          status[0] in (constants.JOB_STATUS_QUEUED,
3523
                        constants.JOB_STATUS_WAITING,
3524
                        constants.JOB_STATUS_CANCELING)):
3525
        # job is still present and waiting
3526
        continue
3527
      # good candidate found (either running job or lost job)
3528
      self.jobs.remove(job_data)
3529
      return job_data
3530

    
3531
    # no job found
3532
    return self.jobs.pop(0)
3533

    
3534
  def GetResults(self):
3535
    """Wait for and return the results of all jobs.
3536

3537
    @rtype: list
3538
    @return: list of tuples (success, job results), in the same order
3539
        as the submitted jobs; if a job has failed, instead of the result
3540
        there will be the error message
3541

3542
    """
3543
    if not self.jobs:
3544
      self.SubmitPending()
3545
    results = []
3546
    if self.verbose:
3547
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3548
      if ok_jobs:
3549
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3550

    
3551
    # first, remove any non-submitted jobs
3552
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3553
    for idx, _, jid, name in failures:
3554
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3555
      results.append((idx, False, jid))
3556

    
3557
    while self.jobs:
3558
      (idx, _, jid, name) = self._ChooseJob()
3559
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3560
      try:
3561
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3562
        success = True
3563
      except errors.JobLost, err:
3564
        _, job_result = FormatError(err)
3565
        ToStderr("Job %s%s has been archived, cannot check its result",
3566
                 jid, self._IfName(name, " for %s"))
3567
        success = False
3568
      except (errors.GenericError, luxi.ProtocolError), err:
3569
        _, job_result = FormatError(err)
3570
        success = False
3571
        # the error message will always be shown, verbose or not
3572
        ToStderr("Job %s%s has failed: %s",
3573
                 jid, self._IfName(name, " for %s"), job_result)
3574

    
3575
      results.append((idx, success, job_result))
3576

    
3577
    # sort based on the index, then drop it
3578
    results.sort()
3579
    results = [i[1:] for i in results]
3580

    
3581
    return results
3582

    
3583
  def WaitOrShow(self, wait):
3584
    """Wait for job results or only print the job IDs.
3585

3586
    @type wait: boolean
3587
    @param wait: whether to wait or not
3588

3589
    """
3590
    if wait:
3591
      return self.GetResults()
3592
    else:
3593
      if not self.jobs:
3594
        self.SubmitPending()
3595
      for _, status, result, name in self.jobs:
3596
        if status:
3597
          ToStdout("%s: %s", result, name)
3598
        else:
3599
          ToStderr("Failure for %s: %s", name, result)
3600
      return [row[1:3] for row in self.jobs]
3601

    
3602

    
3603
def FormatParamsDictInfo(param_dict, actual):
3604
  """Formats a parameter dictionary.
3605

3606
  @type param_dict: dict
3607
  @param param_dict: the own parameters
3608
  @type actual: dict
3609
  @param actual: the current parameter set (including defaults)
3610
  @rtype: dict
3611
  @return: dictionary where the value of each parameter is either a fully
3612
      formatted string or a dictionary containing formatted strings
3613

3614
  """
3615
  ret = {}
3616
  for (key, data) in actual.items():
3617
    if isinstance(data, dict) and data:
3618
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3619
    else:
3620
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3621
  return ret
3622

    
3623

    
3624
def _FormatListInfoDefault(data, def_data):
3625
  if data is not None:
3626
    ret = utils.CommaJoin(data)
3627
  else:
3628
    ret = "default (%s)" % utils.CommaJoin(def_data)
3629
  return ret
3630

    
3631

    
3632
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3633
  """Formats an instance policy.
3634

3635
  @type custom_ipolicy: dict
3636
  @param custom_ipolicy: own policy
3637
  @type eff_ipolicy: dict
3638
  @param eff_ipolicy: effective policy (including defaults); ignored for
3639
      cluster
3640
  @type iscluster: bool
3641
  @param iscluster: the policy is at cluster level
3642
  @rtype: list of pairs
3643
  @return: formatted data, suitable for L{PrintGenericInfo}
3644

3645
  """
3646
  if iscluster:
3647
    eff_ipolicy = custom_ipolicy
3648

    
3649
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3650
  ret = [
3651
    (key,
3652
     FormatParamsDictInfo(custom_minmax.get(key, {}),
3653
                          eff_ipolicy[constants.ISPECS_MINMAX][key]))
3654
    for key in constants.ISPECS_MINMAX_KEYS
3655
    ]
3656
  if iscluster:
3657
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3658
    ret.append(
3659
      (constants.ISPECS_STD,
3660
       FormatParamsDictInfo(stdspecs, stdspecs))
3661
      )
3662

    
3663
  ret.append(
3664
    ("enabled disk templates",
3665
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3666
                            eff_ipolicy[constants.IPOLICY_DTS]))
3667
    )
3668
  ret.extend([
3669
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3670
    for key in constants.IPOLICY_PARAMETERS
3671
    ])
3672
  return ret
3673

    
3674

    
3675
def ConfirmOperation(names, list_type, text, extra=""):
3676
  """Ask the user to confirm an operation on a list of list_type.
3677

3678
  This function is used to request confirmation for doing an operation
3679
  on a given list of list_type.
3680

3681
  @type names: list
3682
  @param names: the list of names that we display when
3683
      we ask for confirmation
3684
  @type list_type: str
3685
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3686
  @type text: str
3687
  @param text: the operation that the user should confirm
3688
  @rtype: boolean
3689
  @return: True or False depending on user's confirmation.
3690

3691
  """
3692
  count = len(names)
3693
  msg = ("The %s will operate on %d %s.\n%s"
3694
         "Do you want to continue?" % (text, count, list_type, extra))
3695
  affected = (("\nAffected %s:\n" % list_type) +
3696
              "\n".join(["  %s" % name for name in names]))
3697

    
3698
  choices = [("y", True, "Yes, execute the %s" % text),
3699
             ("n", False, "No, abort the %s" % text)]
3700

    
3701
  if count > 20:
3702
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3703
    question = msg
3704
  else:
3705
    question = msg + affected
3706

    
3707
  choice = AskUser(question, choices)
3708
  if choice == "v":
3709
    choices.pop(1)
3710
    choice = AskUser(msg + affected, choices)
3711
  return choice
3712

    
3713

    
3714
def _MaybeParseUnit(elements):
3715
  """Parses and returns an array of potential values with units.
3716

3717
  """
3718
  parsed = {}
3719
  for k, v in elements.items():
3720
    if v == constants.VALUE_DEFAULT:
3721
      parsed[k] = v
3722
    else:
3723
      parsed[k] = utils.ParseUnit(v)
3724
  return parsed
3725

    
3726

    
3727
def _InitIspecsFromOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3728
                        ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3729
                        group_ipolicy, allowed_values):
3730
  try:
3731
    if ispecs_mem_size:
3732
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3733
    if ispecs_disk_size:
3734
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3735
  except (TypeError, ValueError, errors.UnitParseError), err:
3736
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3737
                               " in policy: %s" %
3738
                               (ispecs_disk_size, ispecs_mem_size, err),
3739
                               errors.ECODE_INVAL)
3740

    
3741
  # prepare ipolicy dict
3742
  ispecs_transposed = {
3743
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3744
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3745
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3746
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3747
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3748
    }
3749

    
3750
  # first, check that the values given are correct
3751
  if group_ipolicy:
3752
    forced_type = TISPECS_GROUP_TYPES
3753
  else:
3754
    forced_type = TISPECS_CLUSTER_TYPES
3755
  for specs in ispecs_transposed.values():
3756
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3757

    
3758
  # then transpose
3759
  ispecs = {
3760
    constants.ISPECS_MIN: {},
3761
    constants.ISPECS_MAX: {},
3762
    constants.ISPECS_STD: {},
3763
    }
3764
  for (name, specs) in ispecs_transposed.iteritems():
3765
    assert name in constants.ISPECS_PARAMETERS
3766
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3767
      assert key in ispecs
3768
      ispecs[key][name] = val
3769
  for key in constants.ISPECS_MINMAX_KEYS:
3770
    ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
3771
  ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3772

    
3773

    
3774
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3775
                          ispecs_cpu_count=None,
3776
                          ispecs_disk_count=None,
3777
                          ispecs_disk_size=None,
3778
                          ispecs_nic_count=None,
3779
                          ipolicy_disk_templates=None,
3780
                          ipolicy_vcpu_ratio=None,
3781
                          ipolicy_spindle_ratio=None,
3782
                          group_ipolicy=False,
3783
                          allowed_values=None,
3784
                          fill_all=False):
3785
  """Creation of instance policy based on command line options.
3786

3787
  @param fill_all: whether for cluster policies we should ensure that
3788
    all values are filled
3789

3790

3791
  """
3792

    
3793
  ipolicy_out = objects.MakeEmptyIPolicy()
3794
  _InitIspecsFromOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
3795
                      ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3796
                      group_ipolicy, allowed_values)
3797

    
3798
  if ipolicy_disk_templates is not None:
3799
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3800
  if ipolicy_vcpu_ratio is not None:
3801
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3802
  if ipolicy_spindle_ratio is not None:
3803
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3804

    
3805
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3806

    
3807
  if not group_ipolicy and fill_all:
3808
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
3809

    
3810
  return ipolicy_out
3811

    
3812

    
3813
def _SerializeGenericInfo(buf, data, level, afterkey=False):
3814
  """Formatting core of L{PrintGenericInfo}.
3815

3816
  @param buf: (string) stream to accumulate the result into
3817
  @param data: data to format
3818
  @type level: int
3819
  @param level: depth in the data hierarchy, used for indenting
3820
  @type afterkey: bool
3821
  @param afterkey: True when we are in the middle of a line after a key (used
3822
      to properly add newlines or indentation)
3823

3824
  """
3825
  baseind = "  "
3826
  if isinstance(data, dict):
3827
    if not data:
3828
      buf.write("\n")
3829
    else:
3830
      if afterkey:
3831
        buf.write("\n")
3832
        doindent = True
3833
      else:
3834
        doindent = False
3835
      for key in sorted(data):
3836
        if doindent:
3837
          buf.write(baseind * level)
3838
        else:
3839
          doindent = True
3840
        buf.write(key)
3841
        buf.write(": ")
3842
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
3843
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
3844
    # list of tuples (an ordered dictionary)
3845
    if afterkey:
3846
      buf.write("\n")
3847
      doindent = True
3848
    else:
3849
      doindent = False
3850
    for (key, val) in data:
3851
      if doindent:
3852
        buf.write(baseind * level)
3853
      else:
3854
        doindent = True
3855
      buf.write(key)
3856
      buf.write(": ")
3857
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
3858
  elif isinstance(data, list):
3859
    if not data:
3860
      buf.write("\n")
3861
    else:
3862
      if afterkey:
3863
        buf.write("\n")
3864
        doindent = True
3865
      else:
3866
        doindent = False
3867
      for item in data:
3868
        if doindent:
3869
          buf.write(baseind * level)
3870
        else:
3871
          doindent = True
3872
        buf.write("-")
3873
        buf.write(baseind[1:])
3874
        _SerializeGenericInfo(buf, item, level + 1)
3875
  else:
3876
    # This branch should be only taken for strings, but it's practically
3877
    # impossible to guarantee that no other types are produced somewhere
3878
    buf.write(str(data))
3879
    buf.write("\n")
3880

    
3881

    
3882
def PrintGenericInfo(data):
3883
  """Print information formatted according to the hierarchy.
3884

3885
  The output is a valid YAML string.
3886

3887
  @param data: the data to print. It's a hierarchical structure whose elements
3888
      can be:
3889
        - dictionaries, where keys are strings and values are of any of the
3890
          types listed here
3891
        - lists of pairs (key, value), where key is a string and value is of
3892
          any of the types listed here; it's a way to encode ordered
3893
          dictionaries
3894
        - lists of any of the types listed here
3895
        - strings
3896

3897
  """
3898
  buf = StringIO()
3899
  _SerializeGenericInfo(buf, data, 0)
3900
  ToStdout(buf.getvalue().rstrip("\n"))