Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 3438e1f8

History | View | Annotate | Download (126.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_STORAGE_TYPES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INTERVAL_OPT",
112
  "MAC_PREFIX_OPT",
113
  "MAINTAIN_NODE_HEALTH_OPT",
114
  "MASTER_NETDEV_OPT",
115
  "MASTER_NETMASK_OPT",
116
  "MC_OPT",
117
  "MIGRATION_MODE_OPT",
118
  "NET_OPT",
119
  "NETWORK_OPT",
120
  "NETWORK6_OPT",
121
  "NEW_CLUSTER_CERT_OPT",
122
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
123
  "NEW_CONFD_HMAC_KEY_OPT",
124
  "NEW_RAPI_CERT_OPT",
125
  "NEW_PRIMARY_OPT",
126
  "NEW_SECONDARY_OPT",
127
  "NEW_SPICE_CERT_OPT",
128
  "NIC_PARAMS_OPT",
129
  "NOCONFLICTSCHECK_OPT",
130
  "NODE_FORCE_JOIN_OPT",
131
  "NODE_LIST_OPT",
132
  "NODE_PLACEMENT_OPT",
133
  "NODEGROUP_OPT",
134
  "NODE_PARAMS_OPT",
135
  "NODE_POWERED_OPT",
136
  "NODRBD_STORAGE_OPT",
137
  "NOHDR_OPT",
138
  "NOIPCHECK_OPT",
139
  "NO_INSTALL_OPT",
140
  "NONAMECHECK_OPT",
141
  "NOLVM_STORAGE_OPT",
142
  "NOMODIFY_ETCHOSTS_OPT",
143
  "NOMODIFY_SSH_SETUP_OPT",
144
  "NONICS_OPT",
145
  "NONLIVE_OPT",
146
  "NONPLUS1_OPT",
147
  "NORUNTIME_CHGS_OPT",
148
  "NOSHUTDOWN_OPT",
149
  "NOSTART_OPT",
150
  "NOSSH_KEYCHECK_OPT",
151
  "NOVOTING_OPT",
152
  "NO_REMEMBER_OPT",
153
  "NWSYNC_OPT",
154
  "OFFLINE_INST_OPT",
155
  "ONLINE_INST_OPT",
156
  "ON_PRIMARY_OPT",
157
  "ON_SECONDARY_OPT",
158
  "OFFLINE_OPT",
159
  "OSPARAMS_OPT",
160
  "OS_OPT",
161
  "OS_SIZE_OPT",
162
  "OOB_TIMEOUT_OPT",
163
  "POWER_DELAY_OPT",
164
  "PREALLOC_WIPE_DISKS_OPT",
165
  "PRIMARY_IP_VERSION_OPT",
166
  "PRIMARY_ONLY_OPT",
167
  "PRIORITY_OPT",
168
  "RAPI_CERT_OPT",
169
  "READD_OPT",
170
  "REASON_OPT",
171
  "REBOOT_TYPE_OPT",
172
  "REMOVE_INSTANCE_OPT",
173
  "REMOVE_RESERVED_IPS_OPT",
174
  "REMOVE_UIDS_OPT",
175
  "RESERVED_LVS_OPT",
176
  "RUNTIME_MEM_OPT",
177
  "ROMAN_OPT",
178
  "SECONDARY_IP_OPT",
179
  "SECONDARY_ONLY_OPT",
180
  "SELECT_OS_OPT",
181
  "SEP_OPT",
182
  "SHOWCMD_OPT",
183
  "SHOW_MACHINE_OPT",
184
  "SHUTDOWN_TIMEOUT_OPT",
185
  "SINGLE_NODE_OPT",
186
  "SPECS_CPU_COUNT_OPT",
187
  "SPECS_DISK_COUNT_OPT",
188
  "SPECS_DISK_SIZE_OPT",
189
  "SPECS_MEM_SIZE_OPT",
190
  "SPECS_NIC_COUNT_OPT",
191
  "IPOLICY_DISK_TEMPLATES",
192
  "IPOLICY_VCPU_RATIO",
193
  "SPICE_CACERT_OPT",
194
  "SPICE_CERT_OPT",
195
  "SRC_DIR_OPT",
196
  "SRC_NODE_OPT",
197
  "SUBMIT_OPT",
198
  "STARTUP_PAUSED_OPT",
199
  "STATIC_OPT",
200
  "SYNC_OPT",
201
  "TAG_ADD_OPT",
202
  "TAG_SRC_OPT",
203
  "TIMEOUT_OPT",
204
  "TO_GROUP_OPT",
205
  "UIDPOOL_OPT",
206
  "USEUNITS_OPT",
207
  "USE_EXTERNAL_MIP_SCRIPT",
208
  "USE_REPL_NET_OPT",
209
  "VERBOSE_OPT",
210
  "VG_NAME_OPT",
211
  "WFSYNC_OPT",
212
  "YES_DOIT_OPT",
213
  "DISK_STATE_OPT",
214
  "HV_STATE_OPT",
215
  "IGNORE_IPOLICY_OPT",
216
  "INSTANCE_POLICY_OPTS",
217
  # Generic functions for CLI programs
218
  "ConfirmOperation",
219
  "CreateIPolicyFromOpts",
220
  "GenericMain",
221
  "GenericInstanceCreate",
222
  "GenericList",
223
  "GenericListFields",
224
  "GetClient",
225
  "GetOnlineNodes",
226
  "JobExecutor",
227
  "JobSubmittedException",
228
  "ParseTimespec",
229
  "RunWhileClusterStopped",
230
  "SubmitOpCode",
231
  "SubmitOrSend",
232
  "UsesRPC",
233
  # Formatting functions
234
  "ToStderr", "ToStdout",
235
  "FormatError",
236
  "FormatQueryResult",
237
  "FormatParameterDict",
238
  "FormatParamsDictInfo",
239
  "PrintGenericInfo",
240
  "GenerateTable",
241
  "AskUser",
242
  "FormatTimestamp",
243
  "FormatLogMessage",
244
  # Tags functions
245
  "ListTags",
246
  "AddTags",
247
  "RemoveTags",
248
  # command line options support infrastructure
249
  "ARGS_MANY_INSTANCES",
250
  "ARGS_MANY_NODES",
251
  "ARGS_MANY_GROUPS",
252
  "ARGS_MANY_NETWORKS",
253
  "ARGS_NONE",
254
  "ARGS_ONE_INSTANCE",
255
  "ARGS_ONE_NODE",
256
  "ARGS_ONE_GROUP",
257
  "ARGS_ONE_OS",
258
  "ARGS_ONE_NETWORK",
259
  "ArgChoice",
260
  "ArgCommand",
261
  "ArgFile",
262
  "ArgGroup",
263
  "ArgHost",
264
  "ArgInstance",
265
  "ArgJobId",
266
  "ArgNetwork",
267
  "ArgNode",
268
  "ArgOs",
269
  "ArgExtStorage",
270
  "ArgSuggest",
271
  "ArgUnknown",
272
  "OPT_COMPL_INST_ADD_NODES",
273
  "OPT_COMPL_MANY_NODES",
274
  "OPT_COMPL_ONE_IALLOCATOR",
275
  "OPT_COMPL_ONE_INSTANCE",
276
  "OPT_COMPL_ONE_NODE",
277
  "OPT_COMPL_ONE_NODEGROUP",
278
  "OPT_COMPL_ONE_NETWORK",
279
  "OPT_COMPL_ONE_OS",
280
  "OPT_COMPL_ONE_EXTSTORAGE",
281
  "cli_option",
282
  "SplitNodeOption",
283
  "CalculateOSNames",
284
  "ParseFields",
285
  "COMMON_CREATE_OPTS",
286
  ]
287

    
288
NO_PREFIX = "no_"
289
UN_PREFIX = "-"
290

    
291
#: Priorities (sorted)
292
_PRIORITY_NAMES = [
293
  ("low", constants.OP_PRIO_LOW),
294
  ("normal", constants.OP_PRIO_NORMAL),
295
  ("high", constants.OP_PRIO_HIGH),
296
  ]
297

    
298
#: Priority dictionary for easier lookup
299
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
300
# we migrate to Python 2.6
301
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
302

    
303
# Query result status for clients
304
(QR_NORMAL,
305
 QR_UNKNOWN,
306
 QR_INCOMPLETE) = range(3)
307

    
308
#: Maximum batch size for ChooseJob
309
_CHOOSE_BATCH = 25
310

    
311

    
312
# constants used to create InstancePolicy dictionary
313
TISPECS_GROUP_TYPES = {
314
  constants.ISPECS_MIN: constants.VTYPE_INT,
315
  constants.ISPECS_MAX: constants.VTYPE_INT,
316
  }
317

    
318
TISPECS_CLUSTER_TYPES = {
319
  constants.ISPECS_MIN: constants.VTYPE_INT,
320
  constants.ISPECS_MAX: constants.VTYPE_INT,
321
  constants.ISPECS_STD: constants.VTYPE_INT,
322
  }
323

    
324
#: User-friendly names for query2 field types
325
_QFT_NAMES = {
326
  constants.QFT_UNKNOWN: "Unknown",
327
  constants.QFT_TEXT: "Text",
328
  constants.QFT_BOOL: "Boolean",
329
  constants.QFT_NUMBER: "Number",
330
  constants.QFT_UNIT: "Storage size",
331
  constants.QFT_TIMESTAMP: "Timestamp",
332
  constants.QFT_OTHER: "Custom",
333
  }
334

    
335

    
336
class _Argument:
337
  def __init__(self, min=0, max=None): # pylint: disable=W0622
338
    self.min = min
339
    self.max = max
340

    
341
  def __repr__(self):
342
    return ("<%s min=%s max=%s>" %
343
            (self.__class__.__name__, self.min, self.max))
344

    
345

    
346
class ArgSuggest(_Argument):
347
  """Suggesting argument.
348

349
  Value can be any of the ones passed to the constructor.
350

351
  """
352
  # pylint: disable=W0622
353
  def __init__(self, min=0, max=None, choices=None):
354
    _Argument.__init__(self, min=min, max=max)
355
    self.choices = choices
356

    
357
  def __repr__(self):
358
    return ("<%s min=%s max=%s choices=%r>" %
359
            (self.__class__.__name__, self.min, self.max, self.choices))
360

    
361

    
362
class ArgChoice(ArgSuggest):
363
  """Choice argument.
364

365
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
366
  but value must be one of the choices.
367

368
  """
369

    
370

    
371
class ArgUnknown(_Argument):
372
  """Unknown argument to program (e.g. determined at runtime).
373

374
  """
375

    
376

    
377
class ArgInstance(_Argument):
378
  """Instances argument.
379

380
  """
381

    
382

    
383
class ArgNode(_Argument):
384
  """Node argument.
385

386
  """
387

    
388

    
389
class ArgNetwork(_Argument):
390
  """Network argument.
391

392
  """
393

    
394

    
395
class ArgGroup(_Argument):
396
  """Node group argument.
397

398
  """
399

    
400

    
401
class ArgJobId(_Argument):
402
  """Job ID argument.
403

404
  """
405

    
406

    
407
class ArgFile(_Argument):
408
  """File path argument.
409

410
  """
411

    
412

    
413
class ArgCommand(_Argument):
414
  """Command argument.
415

416
  """
417

    
418

    
419
class ArgHost(_Argument):
420
  """Host argument.
421

422
  """
423

    
424

    
425
class ArgOs(_Argument):
426
  """OS argument.
427

428
  """
429

    
430

    
431
class ArgExtStorage(_Argument):
432
  """ExtStorage argument.
433

434
  """
435

    
436

    
437
ARGS_NONE = []
438
ARGS_MANY_INSTANCES = [ArgInstance()]
439
ARGS_MANY_NETWORKS = [ArgNetwork()]
440
ARGS_MANY_NODES = [ArgNode()]
441
ARGS_MANY_GROUPS = [ArgGroup()]
442
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
443
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
444
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
445
# TODO
446
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
447
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
448

    
449

    
450
def _ExtractTagsObject(opts, args):
451
  """Extract the tag type object.
452

453
  Note that this function will modify its args parameter.
454

455
  """
456
  if not hasattr(opts, "tag_type"):
457
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
458
  kind = opts.tag_type
459
  if kind == constants.TAG_CLUSTER:
460
    retval = kind, None
461
  elif kind in (constants.TAG_NODEGROUP,
462
                constants.TAG_NODE,
463
                constants.TAG_NETWORK,
464
                constants.TAG_INSTANCE):
465
    if not args:
466
      raise errors.OpPrereqError("no arguments passed to the command",
467
                                 errors.ECODE_INVAL)
468
    name = args.pop(0)
469
    retval = kind, name
470
  else:
471
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
472
  return retval
473

    
474

    
475
def _ExtendTags(opts, args):
476
  """Extend the args if a source file has been given.
477

478
  This function will extend the tags with the contents of the file
479
  passed in the 'tags_source' attribute of the opts parameter. A file
480
  named '-' will be replaced by stdin.
481

482
  """
483
  fname = opts.tags_source
484
  if fname is None:
485
    return
486
  if fname == "-":
487
    new_fh = sys.stdin
488
  else:
489
    new_fh = open(fname, "r")
490
  new_data = []
491
  try:
492
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
493
    # because of python bug 1633941
494
    while True:
495
      line = new_fh.readline()
496
      if not line:
497
        break
498
      new_data.append(line.strip())
499
  finally:
500
    new_fh.close()
501
  args.extend(new_data)
502

    
503

    
504
def ListTags(opts, args):
505
  """List the tags on a given object.
506

507
  This is a generic implementation that knows how to deal with all
508
  three cases of tag objects (cluster, node, instance). The opts
509
  argument is expected to contain a tag_type field denoting what
510
  object type we work on.
511

512
  """
513
  kind, name = _ExtractTagsObject(opts, args)
514
  cl = GetClient(query=True)
515
  result = cl.QueryTags(kind, name)
516
  result = list(result)
517
  result.sort()
518
  for tag in result:
519
    ToStdout(tag)
520

    
521

    
522
def AddTags(opts, args):
523
  """Add tags on a given object.
524

525
  This is a generic implementation that knows how to deal with all
526
  three cases of tag objects (cluster, node, instance). The opts
527
  argument is expected to contain a tag_type field denoting what
528
  object type we work on.
529

530
  """
531
  kind, name = _ExtractTagsObject(opts, args)
532
  _ExtendTags(opts, args)
533
  if not args:
534
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
535
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
536
  SubmitOrSend(op, opts)
537

    
538

    
539
def RemoveTags(opts, args):
540
  """Remove tags from a given object.
541

542
  This is a generic implementation that knows how to deal with all
543
  three cases of tag objects (cluster, node, instance). The opts
544
  argument is expected to contain a tag_type field denoting what
545
  object type we work on.
546

547
  """
548
  kind, name = _ExtractTagsObject(opts, args)
549
  _ExtendTags(opts, args)
550
  if not args:
551
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
552
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
553
  SubmitOrSend(op, opts)
554

    
555

    
556
def check_unit(option, opt, value): # pylint: disable=W0613
557
  """OptParsers custom converter for units.
558

559
  """
560
  try:
561
    return utils.ParseUnit(value)
562
  except errors.UnitParseError, err:
563
    raise OptionValueError("option %s: %s" % (opt, err))
564

    
565

    
566
def _SplitKeyVal(opt, data):
567
  """Convert a KeyVal string into a dict.
568

569
  This function will convert a key=val[,...] string into a dict. Empty
570
  values will be converted specially: keys which have the prefix 'no_'
571
  will have the value=False and the prefix stripped, the others will
572
  have value=True.
573

574
  @type opt: string
575
  @param opt: a string holding the option name for which we process the
576
      data, used in building error messages
577
  @type data: string
578
  @param data: a string of the format key=val,key=val,...
579
  @rtype: dict
580
  @return: {key=val, key=val}
581
  @raises errors.ParameterError: if there are duplicate keys
582

583
  """
584
  kv_dict = {}
585
  if data:
586
    for elem in utils.UnescapeAndSplit(data, sep=","):
587
      if "=" in elem:
588
        key, val = elem.split("=", 1)
589
      else:
590
        if elem.startswith(NO_PREFIX):
591
          key, val = elem[len(NO_PREFIX):], False
592
        elif elem.startswith(UN_PREFIX):
593
          key, val = elem[len(UN_PREFIX):], None
594
        else:
595
          key, val = elem, True
596
      if key in kv_dict:
597
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
598
                                    (key, opt))
599
      kv_dict[key] = val
600
  return kv_dict
601

    
602

    
603
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
604
  """Custom parser for ident:key=val,key=val options.
605

606
  This will store the parsed values as a tuple (ident, {key: val}). As such,
607
  multiple uses of this option via action=append is possible.
608

609
  """
610
  if ":" not in value:
611
    ident, rest = value, ""
612
  else:
613
    ident, rest = value.split(":", 1)
614

    
615
  if ident.startswith(NO_PREFIX):
616
    if rest:
617
      msg = "Cannot pass options when removing parameter groups: %s" % value
618
      raise errors.ParameterError(msg)
619
    retval = (ident[len(NO_PREFIX):], False)
620
  elif (ident.startswith(UN_PREFIX) and
621
        (len(ident) <= len(UN_PREFIX) or
622
         not ident[len(UN_PREFIX)][0].isdigit())):
623
    if rest:
624
      msg = "Cannot pass options when removing parameter groups: %s" % value
625
      raise errors.ParameterError(msg)
626
    retval = (ident[len(UN_PREFIX):], None)
627
  else:
628
    kv_dict = _SplitKeyVal(opt, rest)
629
    retval = (ident, kv_dict)
630
  return retval
631

    
632

    
633
def check_key_val(option, opt, value):  # pylint: disable=W0613
634
  """Custom parser class for key=val,key=val options.
635

636
  This will store the parsed values as a dict {key: val}.
637

638
  """
639
  return _SplitKeyVal(opt, value)
640

    
641

    
642
def check_bool(option, opt, value): # pylint: disable=W0613
643
  """Custom parser for yes/no options.
644

645
  This will store the parsed value as either True or False.
646

647
  """
648
  value = value.lower()
649
  if value == constants.VALUE_FALSE or value == "no":
650
    return False
651
  elif value == constants.VALUE_TRUE or value == "yes":
652
    return True
653
  else:
654
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
655

    
656

    
657
def check_list(option, opt, value): # pylint: disable=W0613
658
  """Custom parser for comma-separated lists.
659

660
  """
661
  # we have to make this explicit check since "".split(",") is [""],
662
  # not an empty list :(
663
  if not value:
664
    return []
665
  else:
666
    return utils.UnescapeAndSplit(value)
667

    
668

    
669
def check_maybefloat(option, opt, value): # pylint: disable=W0613
670
  """Custom parser for float numbers which might be also defaults.
671

672
  """
673
  value = value.lower()
674

    
675
  if value == constants.VALUE_DEFAULT:
676
    return value
677
  else:
678
    return float(value)
679

    
680

    
681
# completion_suggestion is normally a list. Using numeric values not evaluating
682
# to False for dynamic completion.
683
(OPT_COMPL_MANY_NODES,
684
 OPT_COMPL_ONE_NODE,
685
 OPT_COMPL_ONE_INSTANCE,
686
 OPT_COMPL_ONE_OS,
687
 OPT_COMPL_ONE_EXTSTORAGE,
688
 OPT_COMPL_ONE_IALLOCATOR,
689
 OPT_COMPL_ONE_NETWORK,
690
 OPT_COMPL_INST_ADD_NODES,
691
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
692

    
693
OPT_COMPL_ALL = compat.UniqueFrozenset([
694
  OPT_COMPL_MANY_NODES,
695
  OPT_COMPL_ONE_NODE,
696
  OPT_COMPL_ONE_INSTANCE,
697
  OPT_COMPL_ONE_OS,
698
  OPT_COMPL_ONE_EXTSTORAGE,
699
  OPT_COMPL_ONE_IALLOCATOR,
700
  OPT_COMPL_ONE_NETWORK,
701
  OPT_COMPL_INST_ADD_NODES,
702
  OPT_COMPL_ONE_NODEGROUP,
703
  ])
704

    
705

    
706
class CliOption(Option):
707
  """Custom option class for optparse.
708

709
  """
710
  ATTRS = Option.ATTRS + [
711
    "completion_suggest",
712
    ]
713
  TYPES = Option.TYPES + (
714
    "identkeyval",
715
    "keyval",
716
    "unit",
717
    "bool",
718
    "list",
719
    "maybefloat",
720
    )
721
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
722
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
723
  TYPE_CHECKER["keyval"] = check_key_val
724
  TYPE_CHECKER["unit"] = check_unit
725
  TYPE_CHECKER["bool"] = check_bool
726
  TYPE_CHECKER["list"] = check_list
727
  TYPE_CHECKER["maybefloat"] = check_maybefloat
728

    
729

    
730
# optparse.py sets make_option, so we do it for our own option class, too
731
cli_option = CliOption
732

    
733

    
734
_YORNO = "yes|no"
735

    
736
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
737
                       help="Increase debugging level")
738

    
739
NOHDR_OPT = cli_option("--no-headers", default=False,
740
                       action="store_true", dest="no_headers",
741
                       help="Don't display column headers")
742

    
743
SEP_OPT = cli_option("--separator", default=None,
744
                     action="store", dest="separator",
745
                     help=("Separator between output fields"
746
                           " (defaults to one space)"))
747

    
748
USEUNITS_OPT = cli_option("--units", default=None,
749
                          dest="units", choices=("h", "m", "g", "t"),
750
                          help="Specify units for output (one of h/m/g/t)")
751

    
752
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
753
                        type="string", metavar="FIELDS",
754
                        help="Comma separated list of output fields")
755

    
756
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
757
                       default=False, help="Force the operation")
758

    
759
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
760
                         default=False, help="Do not require confirmation")
761

    
762
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
763
                                  action="store_true", default=False,
764
                                  help=("Ignore offline nodes and do as much"
765
                                        " as possible"))
766

    
767
TAG_ADD_OPT = cli_option("--tags", dest="tags",
768
                         default=None, help="Comma-separated list of instance"
769
                                            " tags")
770

    
771
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
772
                         default=None, help="File with tag names")
773

    
774
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
775
                        default=False, action="store_true",
776
                        help=("Submit the job and return the job ID, but"
777
                              " don't wait for the job to finish"))
778

    
779
SYNC_OPT = cli_option("--sync", dest="do_locking",
780
                      default=False, action="store_true",
781
                      help=("Grab locks while doing the queries"
782
                            " in order to ensure more consistent results"))
783

    
784
DRY_RUN_OPT = cli_option("--dry-run", default=False,
785
                         action="store_true",
786
                         help=("Do not execute the operation, just run the"
787
                               " check steps and verify if it could be"
788
                               " executed"))
789

    
790
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
791
                         action="store_true",
792
                         help="Increase the verbosity of the operation")
793

    
794
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
795
                              action="store_true", dest="simulate_errors",
796
                              help="Debugging option that makes the operation"
797
                              " treat most runtime checks as failed")
798

    
799
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
800
                        default=True, action="store_false",
801
                        help="Don't wait for sync (DANGEROUS!)")
802

    
803
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
804
                        default=False, action="store_true",
805
                        help="Wait for disks to sync")
806

    
807
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
808
                             action="store_true", default=False,
809
                             help="Enable offline instance")
810

    
811
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
812
                              action="store_true", default=False,
813
                              help="Disable down instance")
814

    
815
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
816
                               help=("Custom disk setup (%s)" %
817
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
818
                               default=None, metavar="TEMPL",
819
                               choices=list(constants.DISK_TEMPLATES))
820

    
821
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
822
                        help="Do not create any network cards for"
823
                        " the instance")
824

    
825
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
826
                               help="Relative path under default cluster-wide"
827
                               " file storage dir to store file-based disks",
828
                               default=None, metavar="<DIR>")
829

    
830
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
831
                                  help="Driver to use for image files",
832
                                  default="loop", metavar="<DRIVER>",
833
                                  choices=list(constants.FILE_DRIVER))
834

    
835
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
836
                            help="Select nodes for the instance automatically"
837
                            " using the <NAME> iallocator plugin",
838
                            default=None, type="string",
839
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
840

    
841
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
842
                                    metavar="<NAME>",
843
                                    help="Set the default instance"
844
                                    " allocator plugin",
845
                                    default=None, type="string",
846
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
847

    
848
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
849
                    metavar="<os>",
850
                    completion_suggest=OPT_COMPL_ONE_OS)
851

    
852
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
853
                          type="keyval", default={},
854
                          help="OS parameters")
855

    
856
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
857
                               action="store_true", default=False,
858
                               help="Force an unknown variant")
859

    
860
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
861
                            action="store_true", default=False,
862
                            help="Do not install the OS (will"
863
                            " enable no-start)")
864

    
865
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
866
                                dest="allow_runtime_chgs",
867
                                default=True, action="store_false",
868
                                help="Don't allow runtime changes")
869

    
870
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
871
                         type="keyval", default={},
872
                         help="Backend parameters")
873

    
874
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
875
                        default={}, dest="hvparams",
876
                        help="Hypervisor parameters")
877

    
878
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
879
                             help="Disk template parameters, in the format"
880
                             " template:option=value,option=value,...",
881
                             type="identkeyval", action="append", default=[])
882

    
883
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
884
                                 type="keyval", default={},
885
                                 help="Memory size specs: list of key=value,"
886
                                " where key is one of min, max, std"
887
                                 " (in MB or using a unit)")
888

    
889
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
890
                                 type="keyval", default={},
891
                                 help="CPU count specs: list of key=value,"
892
                                 " where key is one of min, max, std")
893

    
894
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
895
                                  dest="ispecs_disk_count",
896
                                  type="keyval", default={},
897
                                  help="Disk count specs: list of key=value,"
898
                                  " where key is one of min, max, std")
899

    
900
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
901
                                 type="keyval", default={},
902
                                 help="Disk size specs: list of key=value,"
903
                                 " where key is one of min, max, std"
904
                                 " (in MB or using a unit)")
905

    
906
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
907
                                 type="keyval", default={},
908
                                 help="NIC count specs: list of key=value,"
909
                                 " where key is one of min, max, std")
910

    
911
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
912
                                    dest="ipolicy_disk_templates",
913
                                    type="list", default=None,
914
                                    help="Comma-separated list of"
915
                                    " enabled disk templates")
916

    
917
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
918
                                 dest="ipolicy_vcpu_ratio",
919
                                 type="maybefloat", default=None,
920
                                 help="The maximum allowed vcpu-to-cpu ratio")
921

    
922
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
923
                                   dest="ipolicy_spindle_ratio",
924
                                   type="maybefloat", default=None,
925
                                   help=("The maximum allowed instances to"
926
                                         " spindle ratio"))
927

    
928
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
929
                            help="Hypervisor and hypervisor options, in the"
930
                            " format hypervisor:option=value,option=value,...",
931
                            default=None, type="identkeyval")
932

    
933
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
934
                        help="Hypervisor and hypervisor options, in the"
935
                        " format hypervisor:option=value,option=value,...",
936
                        default=[], action="append", type="identkeyval")
937

    
938
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
939
                           action="store_false",
940
                           help="Don't check that the instance's IP"
941
                           " is alive")
942

    
943
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
944
                             default=True, action="store_false",
945
                             help="Don't check that the instance's name"
946
                             " is resolvable")
947

    
948
NET_OPT = cli_option("--net",
949
                     help="NIC parameters", default=[],
950
                     dest="nics", action="append", type="identkeyval")
951

    
952
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
953
                      dest="disks", action="append", type="identkeyval")
954

    
955
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
956
                         help="Comma-separated list of disks"
957
                         " indices to act on (e.g. 0,2) (optional,"
958
                         " defaults to all disks)")
959

    
960
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
961
                         help="Enforces a single-disk configuration using the"
962
                         " given disk size, in MiB unless a suffix is used",
963
                         default=None, type="unit", metavar="<size>")
964

    
965
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
966
                                dest="ignore_consistency",
967
                                action="store_true", default=False,
968
                                help="Ignore the consistency of the disks on"
969
                                " the secondary")
970

    
971
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
972
                                dest="allow_failover",
973
                                action="store_true", default=False,
974
                                help="If migration is not possible fallback to"
975
                                     " failover")
976

    
977
NONLIVE_OPT = cli_option("--non-live", dest="live",
978
                         default=True, action="store_false",
979
                         help="Do a non-live migration (this usually means"
980
                         " freeze the instance, save the state, transfer and"
981
                         " only then resume running on the secondary node)")
982

    
983
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
984
                                default=None,
985
                                choices=list(constants.HT_MIGRATION_MODES),
986
                                help="Override default migration mode (choose"
987
                                " either live or non-live")
988

    
989
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
990
                                help="Target node and optional secondary node",
991
                                metavar="<pnode>[:<snode>]",
992
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
993

    
994
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
995
                           action="append", metavar="<node>",
996
                           help="Use only this node (can be used multiple"
997
                           " times, if not given defaults to all nodes)",
998
                           completion_suggest=OPT_COMPL_ONE_NODE)
999

    
1000
NODEGROUP_OPT_NAME = "--node-group"
1001
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1002
                           dest="nodegroup",
1003
                           help="Node group (name or uuid)",
1004
                           metavar="<nodegroup>",
1005
                           default=None, type="string",
1006
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1007

    
1008
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1009
                             metavar="<node>",
1010
                             completion_suggest=OPT_COMPL_ONE_NODE)
1011

    
1012
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1013
                         action="store_false",
1014
                         help="Don't start the instance after creation")
1015

    
1016
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1017
                         action="store_true", default=False,
1018
                         help="Show command instead of executing it")
1019

    
1020
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1021
                         default=False, action="store_true",
1022
                         help="Instead of performing the migration, try to"
1023
                         " recover from a failed cleanup. This is safe"
1024
                         " to run even if the instance is healthy, but it"
1025
                         " will create extra replication traffic and "
1026
                         " disrupt briefly the replication (like during the"
1027
                         " migration")
1028

    
1029
STATIC_OPT = cli_option("-s", "--static", dest="static",
1030
                        action="store_true", default=False,
1031
                        help="Only show configuration data, not runtime data")
1032

    
1033
ALL_OPT = cli_option("--all", dest="show_all",
1034
                     default=False, action="store_true",
1035
                     help="Show info on all instances on the cluster."
1036
                     " This can take a long time to run, use wisely")
1037

    
1038
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1039
                           action="store_true", default=False,
1040
                           help="Interactive OS reinstall, lists available"
1041
                           " OS templates for selection")
1042

    
1043
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1044
                                 action="store_true", default=False,
1045
                                 help="Remove the instance from the cluster"
1046
                                 " configuration even if there are failures"
1047
                                 " during the removal process")
1048

    
1049
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1050
                                        dest="ignore_remove_failures",
1051
                                        action="store_true", default=False,
1052
                                        help="Remove the instance from the"
1053
                                        " cluster configuration even if there"
1054
                                        " are failures during the removal"
1055
                                        " process")
1056

    
1057
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1058
                                 action="store_true", default=False,
1059
                                 help="Remove the instance from the cluster")
1060

    
1061
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1062
                               help="Specifies the new node for the instance",
1063
                               metavar="NODE", default=None,
1064
                               completion_suggest=OPT_COMPL_ONE_NODE)
1065

    
1066
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1067
                               help="Specifies the new secondary node",
1068
                               metavar="NODE", default=None,
1069
                               completion_suggest=OPT_COMPL_ONE_NODE)
1070

    
1071
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1072
                             help="Specifies the new primary node",
1073
                             metavar="<node>", default=None,
1074
                             completion_suggest=OPT_COMPL_ONE_NODE)
1075

    
1076
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1077
                            default=False, action="store_true",
1078
                            help="Replace the disk(s) on the primary"
1079
                                 " node (applies only to internally mirrored"
1080
                                 " disk templates, e.g. %s)" %
1081
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1082

    
1083
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1084
                              default=False, action="store_true",
1085
                              help="Replace the disk(s) on the secondary"
1086
                                   " node (applies only to internally mirrored"
1087
                                   " disk templates, e.g. %s)" %
1088
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1089

    
1090
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1091
                              default=False, action="store_true",
1092
                              help="Lock all nodes and auto-promote as needed"
1093
                              " to MC status")
1094

    
1095
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1096
                              default=False, action="store_true",
1097
                              help="Automatically replace faulty disks"
1098
                                   " (applies only to internally mirrored"
1099
                                   " disk templates, e.g. %s)" %
1100
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1101

    
1102
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1103
                             default=False, action="store_true",
1104
                             help="Ignore current recorded size"
1105
                             " (useful for forcing activation when"
1106
                             " the recorded size is wrong)")
1107

    
1108
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1109
                          metavar="<node>",
1110
                          completion_suggest=OPT_COMPL_ONE_NODE)
1111

    
1112
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1113
                         metavar="<dir>")
1114

    
1115
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1116
                              help="Specify the secondary ip for the node",
1117
                              metavar="ADDRESS", default=None)
1118

    
1119
READD_OPT = cli_option("--readd", dest="readd",
1120
                       default=False, action="store_true",
1121
                       help="Readd old node after replacing it")
1122

    
1123
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1124
                                default=True, action="store_false",
1125
                                help="Disable SSH key fingerprint checking")
1126

    
1127
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1128
                                 default=False, action="store_true",
1129
                                 help="Force the joining of a node")
1130

    
1131
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1132
                    type="bool", default=None, metavar=_YORNO,
1133
                    help="Set the master_candidate flag on the node")
1134

    
1135
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1136
                         type="bool", default=None,
1137
                         help=("Set the offline flag on the node"
1138
                               " (cluster does not communicate with offline"
1139
                               " nodes)"))
1140

    
1141
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1142
                         type="bool", default=None,
1143
                         help=("Set the drained flag on the node"
1144
                               " (excluded from allocation operations)"))
1145

    
1146
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1147
                              type="bool", default=None, metavar=_YORNO,
1148
                              help="Set the master_capable flag on the node")
1149

    
1150
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1151
                          type="bool", default=None, metavar=_YORNO,
1152
                          help="Set the vm_capable flag on the node")
1153

    
1154
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1155
                             type="bool", default=None, metavar=_YORNO,
1156
                             help="Set the allocatable flag on a volume")
1157

    
1158
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1159
                               help="Disable support for lvm based instances"
1160
                               " (cluster-wide)",
1161
                               action="store_false", default=True)
1162

    
1163
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1164
                            dest="enabled_hypervisors",
1165
                            help="Comma-separated list of hypervisors",
1166
                            type="string", default=None)
1167

    
1168
ENABLED_STORAGE_TYPES_OPT = cli_option("--enabled-storage-types",
1169
                                       dest="enabled_storage_types",
1170
                                       help="Comma-separated list of "
1171
                                            "storage methods",
1172
                                       type="string", default=None)
1173

    
1174
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1175
                            type="keyval", default={},
1176
                            help="NIC parameters")
1177

    
1178
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1179
                         dest="candidate_pool_size", type="int",
1180
                         help="Set the candidate pool size")
1181

    
1182
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1183
                         help=("Enables LVM and specifies the volume group"
1184
                               " name (cluster-wide) for disk allocation"
1185
                               " [%s]" % constants.DEFAULT_VG),
1186
                         metavar="VG", default=None)
1187

    
1188
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1189
                          help="Destroy cluster", action="store_true")
1190

    
1191
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1192
                          help="Skip node agreement check (dangerous)",
1193
                          action="store_true", default=False)
1194

    
1195
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1196
                            help="Specify the mac prefix for the instance IP"
1197
                            " addresses, in the format XX:XX:XX",
1198
                            metavar="PREFIX",
1199
                            default=None)
1200

    
1201
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1202
                               help="Specify the node interface (cluster-wide)"
1203
                               " on which the master IP address will be added"
1204
                               " (cluster init default: %s)" %
1205
                               constants.DEFAULT_BRIDGE,
1206
                               metavar="NETDEV",
1207
                               default=None)
1208

    
1209
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1210
                                help="Specify the netmask of the master IP",
1211
                                metavar="NETMASK",
1212
                                default=None)
1213

    
1214
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1215
                                     dest="use_external_mip_script",
1216
                                     help="Specify whether to run a"
1217
                                     " user-provided script for the master"
1218
                                     " IP address turnup and"
1219
                                     " turndown operations",
1220
                                     type="bool", metavar=_YORNO, default=None)
1221

    
1222
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1223
                                help="Specify the default directory (cluster-"
1224
                                "wide) for storing the file-based disks [%s]" %
1225
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1226
                                metavar="DIR",
1227
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1228

    
1229
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1230
  "--shared-file-storage-dir",
1231
  dest="shared_file_storage_dir",
1232
  help="Specify the default directory (cluster-wide) for storing the"
1233
  " shared file-based disks [%s]" %
1234
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1235
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1236

    
1237
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1238
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1239
                                   action="store_false", default=True)
1240

    
1241
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1242
                                    help="Don't initialize SSH keys",
1243
                                    action="store_false", default=True)
1244

    
1245
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1246
                             help="Enable parseable error messages",
1247
                             action="store_true", default=False)
1248

    
1249
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1250
                          help="Skip N+1 memory redundancy tests",
1251
                          action="store_true", default=False)
1252

    
1253
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1254
                             help="Type of reboot: soft/hard/full",
1255
                             default=constants.INSTANCE_REBOOT_HARD,
1256
                             metavar="<REBOOT>",
1257
                             choices=list(constants.REBOOT_TYPES))
1258

    
1259
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1260
                                    dest="ignore_secondaries",
1261
                                    default=False, action="store_true",
1262
                                    help="Ignore errors from secondaries")
1263

    
1264
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1265
                            action="store_false", default=True,
1266
                            help="Don't shutdown the instance (unsafe)")
1267

    
1268
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1269
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1270
                         help="Maximum time to wait")
1271

    
1272
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1273
                                  dest="shutdown_timeout", type="int",
1274
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1275
                                  help="Maximum time to wait for instance"
1276
                                  " shutdown")
1277

    
1278
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1279
                          default=None,
1280
                          help=("Number of seconds between repetions of the"
1281
                                " command"))
1282

    
1283
EARLY_RELEASE_OPT = cli_option("--early-release",
1284
                               dest="early_release", default=False,
1285
                               action="store_true",
1286
                               help="Release the locks on the secondary"
1287
                               " node(s) early")
1288

    
1289
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1290
                                  dest="new_cluster_cert",
1291
                                  default=False, action="store_true",
1292
                                  help="Generate a new cluster certificate")
1293

    
1294
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1295
                           default=None,
1296
                           help="File containing new RAPI certificate")
1297

    
1298
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1299
                               default=None, action="store_true",
1300
                               help=("Generate a new self-signed RAPI"
1301
                                     " certificate"))
1302

    
1303
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1304
                            default=None,
1305
                            help="File containing new SPICE certificate")
1306

    
1307
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1308
                              default=None,
1309
                              help="File containing the certificate of the CA"
1310
                              " which signed the SPICE certificate")
1311

    
1312
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1313
                                dest="new_spice_cert", default=None,
1314
                                action="store_true",
1315
                                help=("Generate a new self-signed SPICE"
1316
                                      " certificate"))
1317

    
1318
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1319
                                    dest="new_confd_hmac_key",
1320
                                    default=False, action="store_true",
1321
                                    help=("Create a new HMAC key for %s" %
1322
                                          constants.CONFD))
1323

    
1324
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1325
                                       dest="cluster_domain_secret",
1326
                                       default=None,
1327
                                       help=("Load new new cluster domain"
1328
                                             " secret from file"))
1329

    
1330
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1331
                                           dest="new_cluster_domain_secret",
1332
                                           default=False, action="store_true",
1333
                                           help=("Create a new cluster domain"
1334
                                                 " secret"))
1335

    
1336
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1337
                              dest="use_replication_network",
1338
                              help="Whether to use the replication network"
1339
                              " for talking to the nodes",
1340
                              action="store_true", default=False)
1341

    
1342
MAINTAIN_NODE_HEALTH_OPT = \
1343
    cli_option("--maintain-node-health", dest="maintain_node_health",
1344
               metavar=_YORNO, default=None, type="bool",
1345
               help="Configure the cluster to automatically maintain node"
1346
               " health, by shutting down unknown instances, shutting down"
1347
               " unknown DRBD devices, etc.")
1348

    
1349
IDENTIFY_DEFAULTS_OPT = \
1350
    cli_option("--identify-defaults", dest="identify_defaults",
1351
               default=False, action="store_true",
1352
               help="Identify which saved instance parameters are equal to"
1353
               " the current cluster defaults and set them as such, instead"
1354
               " of marking them as overridden")
1355

    
1356
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1357
                         action="store", dest="uid_pool",
1358
                         help=("A list of user-ids or user-id"
1359
                               " ranges separated by commas"))
1360

    
1361
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1362
                          action="store", dest="add_uids",
1363
                          help=("A list of user-ids or user-id"
1364
                                " ranges separated by commas, to be"
1365
                                " added to the user-id pool"))
1366

    
1367
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1368
                             action="store", dest="remove_uids",
1369
                             help=("A list of user-ids or user-id"
1370
                                   " ranges separated by commas, to be"
1371
                                   " removed from the user-id pool"))
1372

    
1373
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1374
                              action="store", dest="reserved_lvs",
1375
                              help=("A comma-separated list of reserved"
1376
                                    " logical volumes names, that will be"
1377
                                    " ignored by cluster verify"))
1378

    
1379
ROMAN_OPT = cli_option("--roman",
1380
                       dest="roman_integers", default=False,
1381
                       action="store_true",
1382
                       help="Use roman numbers for positive integers")
1383

    
1384
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1385
                             action="store", default=None,
1386
                             help="Specifies usermode helper for DRBD")
1387

    
1388
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1389
                                action="store_false", default=True,
1390
                                help="Disable support for DRBD")
1391

    
1392
PRIMARY_IP_VERSION_OPT = \
1393
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1394
               action="store", dest="primary_ip_version",
1395
               metavar="%d|%d" % (constants.IP4_VERSION,
1396
                                  constants.IP6_VERSION),
1397
               help="Cluster-wide IP version for primary IP")
1398

    
1399
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1400
                              action="store_true",
1401
                              help="Show machine name for every line in output")
1402

    
1403
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1404
                              action="store_true",
1405
                              help=("Hide successful results and show failures"
1406
                                    " only (determined by the exit code)"))
1407

    
1408
REASON_OPT = cli_option("--reason", default=None,
1409
                        help="The reason for executing a VM-state-changing"
1410
                             " operation")
1411

    
1412

    
1413
def _PriorityOptionCb(option, _, value, parser):
1414
  """Callback for processing C{--priority} option.
1415

1416
  """
1417
  value = _PRIONAME_TO_VALUE[value]
1418

    
1419
  setattr(parser.values, option.dest, value)
1420

    
1421

    
1422
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1423
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1424
                          choices=_PRIONAME_TO_VALUE.keys(),
1425
                          action="callback", type="choice",
1426
                          callback=_PriorityOptionCb,
1427
                          help="Priority for opcode processing")
1428

    
1429
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1430
                        type="bool", default=None, metavar=_YORNO,
1431
                        help="Sets the hidden flag on the OS")
1432

    
1433
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1434
                        type="bool", default=None, metavar=_YORNO,
1435
                        help="Sets the blacklisted flag on the OS")
1436

    
1437
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1438
                                     type="bool", metavar=_YORNO,
1439
                                     dest="prealloc_wipe_disks",
1440
                                     help=("Wipe disks prior to instance"
1441
                                           " creation"))
1442

    
1443
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1444
                             type="keyval", default=None,
1445
                             help="Node parameters")
1446

    
1447
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1448
                              action="store", metavar="POLICY", default=None,
1449
                              help="Allocation policy for the node group")
1450

    
1451
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1452
                              type="bool", metavar=_YORNO,
1453
                              dest="node_powered",
1454
                              help="Specify if the SoR for node is powered")
1455

    
1456
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1457
                             default=constants.OOB_TIMEOUT,
1458
                             help="Maximum time to wait for out-of-band helper")
1459

    
1460
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1461
                             default=constants.OOB_POWER_DELAY,
1462
                             help="Time in seconds to wait between power-ons")
1463

    
1464
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1465
                              action="store_true", default=False,
1466
                              help=("Whether command argument should be treated"
1467
                                    " as filter"))
1468

    
1469
NO_REMEMBER_OPT = cli_option("--no-remember",
1470
                             dest="no_remember",
1471
                             action="store_true", default=False,
1472
                             help="Perform but do not record the change"
1473
                             " in the configuration")
1474

    
1475
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1476
                              default=False, action="store_true",
1477
                              help="Evacuate primary instances only")
1478

    
1479
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1480
                                default=False, action="store_true",
1481
                                help="Evacuate secondary instances only"
1482
                                     " (applies only to internally mirrored"
1483
                                     " disk templates, e.g. %s)" %
1484
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1485

    
1486
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1487
                                action="store_true", default=False,
1488
                                help="Pause instance at startup")
1489

    
1490
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1491
                          help="Destination node group (name or uuid)",
1492
                          default=None, action="append",
1493
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1494

    
1495
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1496
                               action="append", dest="ignore_errors",
1497
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1498
                               help="Error code to be ignored")
1499

    
1500
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1501
                            action="append",
1502
                            help=("Specify disk state information in the"
1503
                                  " format"
1504
                                  " storage_type/identifier:option=value,...;"
1505
                                  " note this is unused for now"),
1506
                            type="identkeyval")
1507

    
1508
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1509
                          action="append",
1510
                          help=("Specify hypervisor state information in the"
1511
                                " format hypervisor:option=value,...;"
1512
                                " note this is unused for now"),
1513
                          type="identkeyval")
1514

    
1515
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1516
                                action="store_true", default=False,
1517
                                help="Ignore instance policy violations")
1518

    
1519
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1520
                             help="Sets the instance's runtime memory,"
1521
                             " ballooning it up or down to the new value",
1522
                             default=None, type="unit", metavar="<size>")
1523

    
1524
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1525
                          action="store_true", default=False,
1526
                          help="Marks the grow as absolute instead of the"
1527
                          " (default) relative mode")
1528

    
1529
NETWORK_OPT = cli_option("--network",
1530
                         action="store", default=None, dest="network",
1531
                         help="IP network in CIDR notation")
1532

    
1533
GATEWAY_OPT = cli_option("--gateway",
1534
                         action="store", default=None, dest="gateway",
1535
                         help="IP address of the router (gateway)")
1536

    
1537
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1538
                                  action="store", default=None,
1539
                                  dest="add_reserved_ips",
1540
                                  help="Comma-separated list of"
1541
                                  " reserved IPs to add")
1542

    
1543
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1544
                                     action="store", default=None,
1545
                                     dest="remove_reserved_ips",
1546
                                     help="Comma-delimited list of"
1547
                                     " reserved IPs to remove")
1548

    
1549
NETWORK6_OPT = cli_option("--network6",
1550
                          action="store", default=None, dest="network6",
1551
                          help="IP network in CIDR notation")
1552

    
1553
GATEWAY6_OPT = cli_option("--gateway6",
1554
                          action="store", default=None, dest="gateway6",
1555
                          help="IP6 address of the router (gateway)")
1556

    
1557
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1558
                                  dest="conflicts_check",
1559
                                  default=True,
1560
                                  action="store_false",
1561
                                  help="Don't check for conflicting IPs")
1562

    
1563
#: Options provided by all commands
1564
COMMON_OPTS = [DEBUG_OPT]
1565

    
1566
# common options for creating instances. add and import then add their own
1567
# specific ones.
1568
COMMON_CREATE_OPTS = [
1569
  BACKEND_OPT,
1570
  DISK_OPT,
1571
  DISK_TEMPLATE_OPT,
1572
  FILESTORE_DIR_OPT,
1573
  FILESTORE_DRIVER_OPT,
1574
  HYPERVISOR_OPT,
1575
  IALLOCATOR_OPT,
1576
  NET_OPT,
1577
  NODE_PLACEMENT_OPT,
1578
  NOIPCHECK_OPT,
1579
  NOCONFLICTSCHECK_OPT,
1580
  NONAMECHECK_OPT,
1581
  NONICS_OPT,
1582
  NWSYNC_OPT,
1583
  OSPARAMS_OPT,
1584
  OS_SIZE_OPT,
1585
  SUBMIT_OPT,
1586
  TAG_ADD_OPT,
1587
  DRY_RUN_OPT,
1588
  PRIORITY_OPT,
1589
  ]
1590

    
1591
# common instance policy options
1592
INSTANCE_POLICY_OPTS = [
1593
  SPECS_CPU_COUNT_OPT,
1594
  SPECS_DISK_COUNT_OPT,
1595
  SPECS_DISK_SIZE_OPT,
1596
  SPECS_MEM_SIZE_OPT,
1597
  SPECS_NIC_COUNT_OPT,
1598
  IPOLICY_DISK_TEMPLATES,
1599
  IPOLICY_VCPU_RATIO,
1600
  IPOLICY_SPINDLE_RATIO,
1601
  ]
1602

    
1603

    
1604
class _ShowUsage(Exception):
1605
  """Exception class for L{_ParseArgs}.
1606

1607
  """
1608
  def __init__(self, exit_error):
1609
    """Initializes instances of this class.
1610

1611
    @type exit_error: bool
1612
    @param exit_error: Whether to report failure on exit
1613

1614
    """
1615
    Exception.__init__(self)
1616
    self.exit_error = exit_error
1617

    
1618

    
1619
class _ShowVersion(Exception):
1620
  """Exception class for L{_ParseArgs}.
1621

1622
  """
1623

    
1624

    
1625
def _ParseArgs(binary, argv, commands, aliases, env_override):
1626
  """Parser for the command line arguments.
1627

1628
  This function parses the arguments and returns the function which
1629
  must be executed together with its (modified) arguments.
1630

1631
  @param binary: Script name
1632
  @param argv: Command line arguments
1633
  @param commands: Dictionary containing command definitions
1634
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1635
  @param env_override: list of env variables allowed for default args
1636
  @raise _ShowUsage: If usage description should be shown
1637
  @raise _ShowVersion: If version should be shown
1638

1639
  """
1640
  assert not (env_override - set(commands))
1641
  assert not (set(aliases.keys()) & set(commands.keys()))
1642

    
1643
  if len(argv) > 1:
1644
    cmd = argv[1]
1645
  else:
1646
    # No option or command given
1647
    raise _ShowUsage(exit_error=True)
1648

    
1649
  if cmd == "--version":
1650
    raise _ShowVersion()
1651
  elif cmd == "--help":
1652
    raise _ShowUsage(exit_error=False)
1653
  elif not (cmd in commands or cmd in aliases):
1654
    raise _ShowUsage(exit_error=True)
1655

    
1656
  # get command, unalias it, and look it up in commands
1657
  if cmd in aliases:
1658
    if aliases[cmd] not in commands:
1659
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1660
                                   " command '%s'" % (cmd, aliases[cmd]))
1661

    
1662
    cmd = aliases[cmd]
1663

    
1664
  if cmd in env_override:
1665
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1666
    env_args = os.environ.get(args_env_name)
1667
    if env_args:
1668
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1669

    
1670
  func, args_def, parser_opts, usage, description = commands[cmd]
1671
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1672
                        description=description,
1673
                        formatter=TitledHelpFormatter(),
1674
                        usage="%%prog %s %s" % (cmd, usage))
1675
  parser.disable_interspersed_args()
1676
  options, args = parser.parse_args(args=argv[2:])
1677

    
1678
  if not _CheckArguments(cmd, args_def, args):
1679
    return None, None, None
1680

    
1681
  return func, options, args
1682

    
1683

    
1684
def _FormatUsage(binary, commands):
1685
  """Generates a nice description of all commands.
1686

1687
  @param binary: Script name
1688
  @param commands: Dictionary containing command definitions
1689

1690
  """
1691
  # compute the max line length for cmd + usage
1692
  mlen = min(60, max(map(len, commands)))
1693

    
1694
  yield "Usage: %s {command} [options...] [argument...]" % binary
1695
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1696
  yield ""
1697
  yield "Commands:"
1698

    
1699
  # and format a nice command list
1700
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1701
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1702
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1703
    for line in help_lines:
1704
      yield " %-*s   %s" % (mlen, "", line)
1705

    
1706
  yield ""
1707

    
1708

    
1709
def _CheckArguments(cmd, args_def, args):
1710
  """Verifies the arguments using the argument definition.
1711

1712
  Algorithm:
1713

1714
    1. Abort with error if values specified by user but none expected.
1715

1716
    1. For each argument in definition
1717

1718
      1. Keep running count of minimum number of values (min_count)
1719
      1. Keep running count of maximum number of values (max_count)
1720
      1. If it has an unlimited number of values
1721

1722
        1. Abort with error if it's not the last argument in the definition
1723

1724
    1. If last argument has limited number of values
1725

1726
      1. Abort with error if number of values doesn't match or is too large
1727

1728
    1. Abort with error if user didn't pass enough values (min_count)
1729

1730
  """
1731
  if args and not args_def:
1732
    ToStderr("Error: Command %s expects no arguments", cmd)
1733
    return False
1734

    
1735
  min_count = None
1736
  max_count = None
1737
  check_max = None
1738

    
1739
  last_idx = len(args_def) - 1
1740

    
1741
  for idx, arg in enumerate(args_def):
1742
    if min_count is None:
1743
      min_count = arg.min
1744
    elif arg.min is not None:
1745
      min_count += arg.min
1746

    
1747
    if max_count is None:
1748
      max_count = arg.max
1749
    elif arg.max is not None:
1750
      max_count += arg.max
1751

    
1752
    if idx == last_idx:
1753
      check_max = (arg.max is not None)
1754

    
1755
    elif arg.max is None:
1756
      raise errors.ProgrammerError("Only the last argument can have max=None")
1757

    
1758
  if check_max:
1759
    # Command with exact number of arguments
1760
    if (min_count is not None and max_count is not None and
1761
        min_count == max_count and len(args) != min_count):
1762
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1763
      return False
1764

    
1765
    # Command with limited number of arguments
1766
    if max_count is not None and len(args) > max_count:
1767
      ToStderr("Error: Command %s expects only %d argument(s)",
1768
               cmd, max_count)
1769
      return False
1770

    
1771
  # Command with some required arguments
1772
  if min_count is not None and len(args) < min_count:
1773
    ToStderr("Error: Command %s expects at least %d argument(s)",
1774
             cmd, min_count)
1775
    return False
1776

    
1777
  return True
1778

    
1779

    
1780
def SplitNodeOption(value):
1781
  """Splits the value of a --node option.
1782

1783
  """
1784
  if value and ":" in value:
1785
    return value.split(":", 1)
1786
  else:
1787
    return (value, None)
1788

    
1789

    
1790
def CalculateOSNames(os_name, os_variants):
1791
  """Calculates all the names an OS can be called, according to its variants.
1792

1793
  @type os_name: string
1794
  @param os_name: base name of the os
1795
  @type os_variants: list or None
1796
  @param os_variants: list of supported variants
1797
  @rtype: list
1798
  @return: list of valid names
1799

1800
  """
1801
  if os_variants:
1802
    return ["%s+%s" % (os_name, v) for v in os_variants]
1803
  else:
1804
    return [os_name]
1805

    
1806

    
1807
def ParseFields(selected, default):
1808
  """Parses the values of "--field"-like options.
1809

1810
  @type selected: string or None
1811
  @param selected: User-selected options
1812
  @type default: list
1813
  @param default: Default fields
1814

1815
  """
1816
  if selected is None:
1817
    return default
1818

    
1819
  if selected.startswith("+"):
1820
    return default + selected[1:].split(",")
1821

    
1822
  return selected.split(",")
1823

    
1824

    
1825
UsesRPC = rpc.RunWithRPC
1826

    
1827

    
1828
def AskUser(text, choices=None):
1829
  """Ask the user a question.
1830

1831
  @param text: the question to ask
1832

1833
  @param choices: list with elements tuples (input_char, return_value,
1834
      description); if not given, it will default to: [('y', True,
1835
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1836
      note that the '?' char is reserved for help
1837

1838
  @return: one of the return values from the choices list; if input is
1839
      not possible (i.e. not running with a tty, we return the last
1840
      entry from the list
1841

1842
  """
1843
  if choices is None:
1844
    choices = [("y", True, "Perform the operation"),
1845
               ("n", False, "Do not perform the operation")]
1846
  if not choices or not isinstance(choices, list):
1847
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1848
  for entry in choices:
1849
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1850
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1851

    
1852
  answer = choices[-1][1]
1853
  new_text = []
1854
  for line in text.splitlines():
1855
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1856
  text = "\n".join(new_text)
1857
  try:
1858
    f = file("/dev/tty", "a+")
1859
  except IOError:
1860
    return answer
1861
  try:
1862
    chars = [entry[0] for entry in choices]
1863
    chars[-1] = "[%s]" % chars[-1]
1864
    chars.append("?")
1865
    maps = dict([(entry[0], entry[1]) for entry in choices])
1866
    while True:
1867
      f.write(text)
1868
      f.write("\n")
1869
      f.write("/".join(chars))
1870
      f.write(": ")
1871
      line = f.readline(2).strip().lower()
1872
      if line in maps:
1873
        answer = maps[line]
1874
        break
1875
      elif line == "?":
1876
        for entry in choices:
1877
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1878
        f.write("\n")
1879
        continue
1880
  finally:
1881
    f.close()
1882
  return answer
1883

    
1884

    
1885
class JobSubmittedException(Exception):
1886
  """Job was submitted, client should exit.
1887

1888
  This exception has one argument, the ID of the job that was
1889
  submitted. The handler should print this ID.
1890

1891
  This is not an error, just a structured way to exit from clients.
1892

1893
  """
1894

    
1895

    
1896
def SendJob(ops, cl=None):
1897
  """Function to submit an opcode without waiting for the results.
1898

1899
  @type ops: list
1900
  @param ops: list of opcodes
1901
  @type cl: luxi.Client
1902
  @param cl: the luxi client to use for communicating with the master;
1903
             if None, a new client will be created
1904

1905
  """
1906
  if cl is None:
1907
    cl = GetClient()
1908

    
1909
  job_id = cl.SubmitJob(ops)
1910

    
1911
  return job_id
1912

    
1913

    
1914
def GenericPollJob(job_id, cbs, report_cbs):
1915
  """Generic job-polling function.
1916

1917
  @type job_id: number
1918
  @param job_id: Job ID
1919
  @type cbs: Instance of L{JobPollCbBase}
1920
  @param cbs: Data callbacks
1921
  @type report_cbs: Instance of L{JobPollReportCbBase}
1922
  @param report_cbs: Reporting callbacks
1923

1924
  """
1925
  prev_job_info = None
1926
  prev_logmsg_serial = None
1927

    
1928
  status = None
1929

    
1930
  while True:
1931
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1932
                                      prev_logmsg_serial)
1933
    if not result:
1934
      # job not found, go away!
1935
      raise errors.JobLost("Job with id %s lost" % job_id)
1936

    
1937
    if result == constants.JOB_NOTCHANGED:
1938
      report_cbs.ReportNotChanged(job_id, status)
1939

    
1940
      # Wait again
1941
      continue
1942

    
1943
    # Split result, a tuple of (field values, log entries)
1944
    (job_info, log_entries) = result
1945
    (status, ) = job_info
1946

    
1947
    if log_entries:
1948
      for log_entry in log_entries:
1949
        (serial, timestamp, log_type, message) = log_entry
1950
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1951
                                    log_type, message)
1952
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1953

    
1954
    # TODO: Handle canceled and archived jobs
1955
    elif status in (constants.JOB_STATUS_SUCCESS,
1956
                    constants.JOB_STATUS_ERROR,
1957
                    constants.JOB_STATUS_CANCELING,
1958
                    constants.JOB_STATUS_CANCELED):
1959
      break
1960

    
1961
    prev_job_info = job_info
1962

    
1963
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1964
  if not jobs:
1965
    raise errors.JobLost("Job with id %s lost" % job_id)
1966

    
1967
  status, opstatus, result = jobs[0]
1968

    
1969
  if status == constants.JOB_STATUS_SUCCESS:
1970
    return result
1971

    
1972
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1973
    raise errors.OpExecError("Job was canceled")
1974

    
1975
  has_ok = False
1976
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1977
    if status == constants.OP_STATUS_SUCCESS:
1978
      has_ok = True
1979
    elif status == constants.OP_STATUS_ERROR:
1980
      errors.MaybeRaise(msg)
1981

    
1982
      if has_ok:
1983
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1984
                                 (idx, msg))
1985

    
1986
      raise errors.OpExecError(str(msg))
1987

    
1988
  # default failure mode
1989
  raise errors.OpExecError(result)
1990

    
1991

    
1992
class JobPollCbBase:
1993
  """Base class for L{GenericPollJob} callbacks.
1994

1995
  """
1996
  def __init__(self):
1997
    """Initializes this class.
1998

1999
    """
2000

    
2001
  def WaitForJobChangeOnce(self, job_id, fields,
2002
                           prev_job_info, prev_log_serial):
2003
    """Waits for changes on a job.
2004

2005
    """
2006
    raise NotImplementedError()
2007

    
2008
  def QueryJobs(self, job_ids, fields):
2009
    """Returns the selected fields for the selected job IDs.
2010

2011
    @type job_ids: list of numbers
2012
    @param job_ids: Job IDs
2013
    @type fields: list of strings
2014
    @param fields: Fields
2015

2016
    """
2017
    raise NotImplementedError()
2018

    
2019

    
2020
class JobPollReportCbBase:
2021
  """Base class for L{GenericPollJob} reporting callbacks.
2022

2023
  """
2024
  def __init__(self):
2025
    """Initializes this class.
2026

2027
    """
2028

    
2029
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2030
    """Handles a log message.
2031

2032
    """
2033
    raise NotImplementedError()
2034

    
2035
  def ReportNotChanged(self, job_id, status):
2036
    """Called for if a job hasn't changed in a while.
2037

2038
    @type job_id: number
2039
    @param job_id: Job ID
2040
    @type status: string or None
2041
    @param status: Job status if available
2042

2043
    """
2044
    raise NotImplementedError()
2045

    
2046

    
2047
class _LuxiJobPollCb(JobPollCbBase):
2048
  def __init__(self, cl):
2049
    """Initializes this class.
2050

2051
    """
2052
    JobPollCbBase.__init__(self)
2053
    self.cl = cl
2054

    
2055
  def WaitForJobChangeOnce(self, job_id, fields,
2056
                           prev_job_info, prev_log_serial):
2057
    """Waits for changes on a job.
2058

2059
    """
2060
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2061
                                        prev_job_info, prev_log_serial)
2062

    
2063
  def QueryJobs(self, job_ids, fields):
2064
    """Returns the selected fields for the selected job IDs.
2065

2066
    """
2067
    return self.cl.QueryJobs(job_ids, fields)
2068

    
2069

    
2070
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2071
  def __init__(self, feedback_fn):
2072
    """Initializes this class.
2073

2074
    """
2075
    JobPollReportCbBase.__init__(self)
2076

    
2077
    self.feedback_fn = feedback_fn
2078

    
2079
    assert callable(feedback_fn)
2080

    
2081
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2082
    """Handles a log message.
2083

2084
    """
2085
    self.feedback_fn((timestamp, log_type, log_msg))
2086

    
2087
  def ReportNotChanged(self, job_id, status):
2088
    """Called if a job hasn't changed in a while.
2089

2090
    """
2091
    # Ignore
2092

    
2093

    
2094
class StdioJobPollReportCb(JobPollReportCbBase):
2095
  def __init__(self):
2096
    """Initializes this class.
2097

2098
    """
2099
    JobPollReportCbBase.__init__(self)
2100

    
2101
    self.notified_queued = False
2102
    self.notified_waitlock = False
2103

    
2104
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2105
    """Handles a log message.
2106

2107
    """
2108
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2109
             FormatLogMessage(log_type, log_msg))
2110

    
2111
  def ReportNotChanged(self, job_id, status):
2112
    """Called if a job hasn't changed in a while.
2113

2114
    """
2115
    if status is None:
2116
      return
2117

    
2118
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2119
      ToStderr("Job %s is waiting in queue", job_id)
2120
      self.notified_queued = True
2121

    
2122
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2123
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2124
      self.notified_waitlock = True
2125

    
2126

    
2127
def FormatLogMessage(log_type, log_msg):
2128
  """Formats a job message according to its type.
2129

2130
  """
2131
  if log_type != constants.ELOG_MESSAGE:
2132
    log_msg = str(log_msg)
2133

    
2134
  return utils.SafeEncode(log_msg)
2135

    
2136

    
2137
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2138
  """Function to poll for the result of a job.
2139

2140
  @type job_id: job identified
2141
  @param job_id: the job to poll for results
2142
  @type cl: luxi.Client
2143
  @param cl: the luxi client to use for communicating with the master;
2144
             if None, a new client will be created
2145

2146
  """
2147
  if cl is None:
2148
    cl = GetClient()
2149

    
2150
  if reporter is None:
2151
    if feedback_fn:
2152
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2153
    else:
2154
      reporter = StdioJobPollReportCb()
2155
  elif feedback_fn:
2156
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2157

    
2158
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2159

    
2160

    
2161
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2162
  """Legacy function to submit an opcode.
2163

2164
  This is just a simple wrapper over the construction of the processor
2165
  instance. It should be extended to better handle feedback and
2166
  interaction functions.
2167

2168
  """
2169
  if cl is None:
2170
    cl = GetClient()
2171

    
2172
  SetGenericOpcodeOpts([op], opts)
2173

    
2174
  job_id = SendJob([op], cl=cl)
2175

    
2176
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2177
                       reporter=reporter)
2178

    
2179
  return op_results[0]
2180

    
2181

    
2182
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2183
  """Wrapper around SubmitOpCode or SendJob.
2184

2185
  This function will decide, based on the 'opts' parameter, whether to
2186
  submit and wait for the result of the opcode (and return it), or
2187
  whether to just send the job and print its identifier. It is used in
2188
  order to simplify the implementation of the '--submit' option.
2189

2190
  It will also process the opcodes if we're sending the via SendJob
2191
  (otherwise SubmitOpCode does it).
2192

2193
  """
2194
  if opts and opts.submit_only:
2195
    job = [op]
2196
    SetGenericOpcodeOpts(job, opts)
2197
    job_id = SendJob(job, cl=cl)
2198
    raise JobSubmittedException(job_id)
2199
  else:
2200
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2201

    
2202

    
2203
def SetGenericOpcodeOpts(opcode_list, options):
2204
  """Processor for generic options.
2205

2206
  This function updates the given opcodes based on generic command
2207
  line options (like debug, dry-run, etc.).
2208

2209
  @param opcode_list: list of opcodes
2210
  @param options: command line options or None
2211
  @return: None (in-place modification)
2212

2213
  """
2214
  if not options:
2215
    return
2216
  for op in opcode_list:
2217
    op.debug_level = options.debug
2218
    if hasattr(options, "dry_run"):
2219
      op.dry_run = options.dry_run
2220
    if getattr(options, "priority", None) is not None:
2221
      op.priority = options.priority
2222

    
2223

    
2224
def GetClient(query=False):
2225
  """Connects to the a luxi socket and returns a client.
2226

2227
  @type query: boolean
2228
  @param query: this signifies that the client will only be
2229
      used for queries; if the build-time parameter
2230
      enable-split-queries is enabled, then the client will be
2231
      connected to the query socket instead of the masterd socket
2232

2233
  """
2234
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2235
  if override_socket:
2236
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2237
      address = pathutils.MASTER_SOCKET
2238
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2239
      address = pathutils.QUERY_SOCKET
2240
    else:
2241
      address = override_socket
2242
  elif query and constants.ENABLE_SPLIT_QUERY:
2243
    address = pathutils.QUERY_SOCKET
2244
  else:
2245
    address = None
2246
  # TODO: Cache object?
2247
  try:
2248
    client = luxi.Client(address=address)
2249
  except luxi.NoMasterError:
2250
    ss = ssconf.SimpleStore()
2251

    
2252
    # Try to read ssconf file
2253
    try:
2254
      ss.GetMasterNode()
2255
    except errors.ConfigurationError:
2256
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2257
                                 " not part of a cluster",
2258
                                 errors.ECODE_INVAL)
2259

    
2260
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2261
    if master != myself:
2262
      raise errors.OpPrereqError("This is not the master node, please connect"
2263
                                 " to node '%s' and rerun the command" %
2264
                                 master, errors.ECODE_INVAL)
2265
    raise
2266
  return client
2267

    
2268

    
2269
def FormatError(err):
2270
  """Return a formatted error message for a given error.
2271

2272
  This function takes an exception instance and returns a tuple
2273
  consisting of two values: first, the recommended exit code, and
2274
  second, a string describing the error message (not
2275
  newline-terminated).
2276

2277
  """
2278
  retcode = 1
2279
  obuf = StringIO()
2280
  msg = str(err)
2281
  if isinstance(err, errors.ConfigurationError):
2282
    txt = "Corrupt configuration file: %s" % msg
2283
    logging.error(txt)
2284
    obuf.write(txt + "\n")
2285
    obuf.write("Aborting.")
2286
    retcode = 2
2287
  elif isinstance(err, errors.HooksAbort):
2288
    obuf.write("Failure: hooks execution failed:\n")
2289
    for node, script, out in err.args[0]:
2290
      if out:
2291
        obuf.write("  node: %s, script: %s, output: %s\n" %
2292
                   (node, script, out))
2293
      else:
2294
        obuf.write("  node: %s, script: %s (no output)\n" %
2295
                   (node, script))
2296
  elif isinstance(err, errors.HooksFailure):
2297
    obuf.write("Failure: hooks general failure: %s" % msg)
2298
  elif isinstance(err, errors.ResolverError):
2299
    this_host = netutils.Hostname.GetSysName()
2300
    if err.args[0] == this_host:
2301
      msg = "Failure: can't resolve my own hostname ('%s')"
2302
    else:
2303
      msg = "Failure: can't resolve hostname '%s'"
2304
    obuf.write(msg % err.args[0])
2305
  elif isinstance(err, errors.OpPrereqError):
2306
    if len(err.args) == 2:
2307
      obuf.write("Failure: prerequisites not met for this"
2308
                 " operation:\nerror type: %s, error details:\n%s" %
2309
                 (err.args[1], err.args[0]))
2310
    else:
2311
      obuf.write("Failure: prerequisites not met for this"
2312
                 " operation:\n%s" % msg)
2313
  elif isinstance(err, errors.OpExecError):
2314
    obuf.write("Failure: command execution error:\n%s" % msg)
2315
  elif isinstance(err, errors.TagError):
2316
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2317
  elif isinstance(err, errors.JobQueueDrainError):
2318
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2319
               " accept new requests\n")
2320
  elif isinstance(err, errors.JobQueueFull):
2321
    obuf.write("Failure: the job queue is full and doesn't accept new"
2322
               " job submissions until old jobs are archived\n")
2323
  elif isinstance(err, errors.TypeEnforcementError):
2324
    obuf.write("Parameter Error: %s" % msg)
2325
  elif isinstance(err, errors.ParameterError):
2326
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2327
  elif isinstance(err, luxi.NoMasterError):
2328
    if err.args[0] == pathutils.MASTER_SOCKET:
2329
      daemon = "the master daemon"
2330
    elif err.args[0] == pathutils.QUERY_SOCKET:
2331
      daemon = "the config daemon"
2332
    else:
2333
      daemon = "socket '%s'" % str(err.args[0])
2334
    obuf.write("Cannot communicate with %s.\nIs the process running"
2335
               " and listening for connections?" % daemon)
2336
  elif isinstance(err, luxi.TimeoutError):
2337
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2338
               " been submitted and will continue to run even if the call"
2339
               " timed out. Useful commands in this situation are \"gnt-job"
2340
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2341
    obuf.write(msg)
2342
  elif isinstance(err, luxi.PermissionError):
2343
    obuf.write("It seems you don't have permissions to connect to the"
2344
               " master daemon.\nPlease retry as a different user.")
2345
  elif isinstance(err, luxi.ProtocolError):
2346
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2347
               "%s" % msg)
2348
  elif isinstance(err, errors.JobLost):
2349
    obuf.write("Error checking job status: %s" % msg)
2350
  elif isinstance(err, errors.QueryFilterParseError):
2351
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2352
    obuf.write("\n".join(err.GetDetails()))
2353
  elif isinstance(err, errors.GenericError):
2354
    obuf.write("Unhandled Ganeti error: %s" % msg)
2355
  elif isinstance(err, JobSubmittedException):
2356
    obuf.write("JobID: %s\n" % err.args[0])
2357
    retcode = 0
2358
  else:
2359
    obuf.write("Unhandled exception: %s" % msg)
2360
  return retcode, obuf.getvalue().rstrip("\n")
2361

    
2362

    
2363
def GenericMain(commands, override=None, aliases=None,
2364
                env_override=frozenset()):
2365
  """Generic main function for all the gnt-* commands.
2366

2367
  @param commands: a dictionary with a special structure, see the design doc
2368
                   for command line handling.
2369
  @param override: if not None, we expect a dictionary with keys that will
2370
                   override command line options; this can be used to pass
2371
                   options from the scripts to generic functions
2372
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2373
  @param env_override: list of environment names which are allowed to submit
2374
                       default args for commands
2375

2376
  """
2377
  # save the program name and the entire command line for later logging
2378
  if sys.argv:
2379
    binary = os.path.basename(sys.argv[0])
2380
    if not binary:
2381
      binary = sys.argv[0]
2382

    
2383
    if len(sys.argv) >= 2:
2384
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2385
    else:
2386
      logname = binary
2387

    
2388
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2389
  else:
2390
    binary = "<unknown program>"
2391
    cmdline = "<unknown>"
2392

    
2393
  if aliases is None:
2394
    aliases = {}
2395

    
2396
  try:
2397
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2398
                                       env_override)
2399
  except _ShowVersion:
2400
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2401
             constants.RELEASE_VERSION)
2402
    return constants.EXIT_SUCCESS
2403
  except _ShowUsage, err:
2404
    for line in _FormatUsage(binary, commands):
2405
      ToStdout(line)
2406

    
2407
    if err.exit_error:
2408
      return constants.EXIT_FAILURE
2409
    else:
2410
      return constants.EXIT_SUCCESS
2411
  except errors.ParameterError, err:
2412
    result, err_msg = FormatError(err)
2413
    ToStderr(err_msg)
2414
    return 1
2415

    
2416
  if func is None: # parse error
2417
    return 1
2418

    
2419
  if override is not None:
2420
    for key, val in override.iteritems():
2421
      setattr(options, key, val)
2422

    
2423
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2424
                     stderr_logging=True)
2425

    
2426
  logging.info("Command line: %s", cmdline)
2427

    
2428
  try:
2429
    result = func(options, args)
2430
  except (errors.GenericError, luxi.ProtocolError,
2431
          JobSubmittedException), err:
2432
    result, err_msg = FormatError(err)
2433
    logging.exception("Error during command processing")
2434
    ToStderr(err_msg)
2435
  except KeyboardInterrupt:
2436
    result = constants.EXIT_FAILURE
2437
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2438
             " might have been submitted and"
2439
             " will continue to run in the background.")
2440
  except IOError, err:
2441
    if err.errno == errno.EPIPE:
2442
      # our terminal went away, we'll exit
2443
      sys.exit(constants.EXIT_FAILURE)
2444
    else:
2445
      raise
2446

    
2447
  return result
2448

    
2449

    
2450
def ParseNicOption(optvalue):
2451
  """Parses the value of the --net option(s).
2452

2453
  """
2454
  try:
2455
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2456
  except (TypeError, ValueError), err:
2457
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2458
                               errors.ECODE_INVAL)
2459

    
2460
  nics = [{}] * nic_max
2461
  for nidx, ndict in optvalue:
2462
    nidx = int(nidx)
2463

    
2464
    if not isinstance(ndict, dict):
2465
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2466
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2467

    
2468
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2469

    
2470
    nics[nidx] = ndict
2471

    
2472
  return nics
2473

    
2474

    
2475
def GenericInstanceCreate(mode, opts, args):
2476
  """Add an instance to the cluster via either creation or import.
2477

2478
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2479
  @param opts: the command line options selected by the user
2480
  @type args: list
2481
  @param args: should contain only one element, the new instance name
2482
  @rtype: int
2483
  @return: the desired exit code
2484

2485
  """
2486
  instance = args[0]
2487

    
2488
  (pnode, snode) = SplitNodeOption(opts.node)
2489

    
2490
  hypervisor = None
2491
  hvparams = {}
2492
  if opts.hypervisor:
2493
    hypervisor, hvparams = opts.hypervisor
2494

    
2495
  if opts.nics:
2496
    nics = ParseNicOption(opts.nics)
2497
  elif opts.no_nics:
2498
    # no nics
2499
    nics = []
2500
  elif mode == constants.INSTANCE_CREATE:
2501
    # default of one nic, all auto
2502
    nics = [{}]
2503
  else:
2504
    # mode == import
2505
    nics = []
2506

    
2507
  if opts.disk_template == constants.DT_DISKLESS:
2508
    if opts.disks or opts.sd_size is not None:
2509
      raise errors.OpPrereqError("Diskless instance but disk"
2510
                                 " information passed", errors.ECODE_INVAL)
2511
    disks = []
2512
  else:
2513
    if (not opts.disks and not opts.sd_size
2514
        and mode == constants.INSTANCE_CREATE):
2515
      raise errors.OpPrereqError("No disk information specified",
2516
                                 errors.ECODE_INVAL)
2517
    if opts.disks and opts.sd_size is not None:
2518
      raise errors.OpPrereqError("Please use either the '--disk' or"
2519
                                 " '-s' option", errors.ECODE_INVAL)
2520
    if opts.sd_size is not None:
2521
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2522

    
2523
    if opts.disks:
2524
      try:
2525
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2526
      except ValueError, err:
2527
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2528
                                   errors.ECODE_INVAL)
2529
      disks = [{}] * disk_max
2530
    else:
2531
      disks = []
2532
    for didx, ddict in opts.disks:
2533
      didx = int(didx)
2534
      if not isinstance(ddict, dict):
2535
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2536
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2537
      elif constants.IDISK_SIZE in ddict:
2538
        if constants.IDISK_ADOPT in ddict:
2539
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2540
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2541
        try:
2542
          ddict[constants.IDISK_SIZE] = \
2543
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2544
        except ValueError, err:
2545
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2546
                                     (didx, err), errors.ECODE_INVAL)
2547
      elif constants.IDISK_ADOPT in ddict:
2548
        if mode == constants.INSTANCE_IMPORT:
2549
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2550
                                     " import", errors.ECODE_INVAL)
2551
        ddict[constants.IDISK_SIZE] = 0
2552
      else:
2553
        raise errors.OpPrereqError("Missing size or adoption source for"
2554
                                   " disk %d" % didx, errors.ECODE_INVAL)
2555
      disks[didx] = ddict
2556

    
2557
  if opts.tags is not None:
2558
    tags = opts.tags.split(",")
2559
  else:
2560
    tags = []
2561

    
2562
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2563
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2564

    
2565
  if mode == constants.INSTANCE_CREATE:
2566
    start = opts.start
2567
    os_type = opts.os
2568
    force_variant = opts.force_variant
2569
    src_node = None
2570
    src_path = None
2571
    no_install = opts.no_install
2572
    identify_defaults = False
2573
  elif mode == constants.INSTANCE_IMPORT:
2574
    start = False
2575
    os_type = None
2576
    force_variant = False
2577
    src_node = opts.src_node
2578
    src_path = opts.src_dir
2579
    no_install = None
2580
    identify_defaults = opts.identify_defaults
2581
  else:
2582
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2583

    
2584
  op = opcodes.OpInstanceCreate(instance_name=instance,
2585
                                disks=disks,
2586
                                disk_template=opts.disk_template,
2587
                                nics=nics,
2588
                                conflicts_check=opts.conflicts_check,
2589
                                pnode=pnode, snode=snode,
2590
                                ip_check=opts.ip_check,
2591
                                name_check=opts.name_check,
2592
                                wait_for_sync=opts.wait_for_sync,
2593
                                file_storage_dir=opts.file_storage_dir,
2594
                                file_driver=opts.file_driver,
2595
                                iallocator=opts.iallocator,
2596
                                hypervisor=hypervisor,
2597
                                hvparams=hvparams,
2598
                                beparams=opts.beparams,
2599
                                osparams=opts.osparams,
2600
                                mode=mode,
2601
                                start=start,
2602
                                os_type=os_type,
2603
                                force_variant=force_variant,
2604
                                src_node=src_node,
2605
                                src_path=src_path,
2606
                                tags=tags,
2607
                                no_install=no_install,
2608
                                identify_defaults=identify_defaults,
2609
                                ignore_ipolicy=opts.ignore_ipolicy)
2610

    
2611
  SubmitOrSend(op, opts)
2612
  return 0
2613

    
2614

    
2615
class _RunWhileClusterStoppedHelper:
2616
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2617

2618
  """
2619
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2620
    """Initializes this class.
2621

2622
    @type feedback_fn: callable
2623
    @param feedback_fn: Feedback function
2624
    @type cluster_name: string
2625
    @param cluster_name: Cluster name
2626
    @type master_node: string
2627
    @param master_node Master node name
2628
    @type online_nodes: list
2629
    @param online_nodes: List of names of online nodes
2630

2631
    """
2632
    self.feedback_fn = feedback_fn
2633
    self.cluster_name = cluster_name
2634
    self.master_node = master_node
2635
    self.online_nodes = online_nodes
2636

    
2637
    self.ssh = ssh.SshRunner(self.cluster_name)
2638

    
2639
    self.nonmaster_nodes = [name for name in online_nodes
2640
                            if name != master_node]
2641

    
2642
    assert self.master_node not in self.nonmaster_nodes
2643

    
2644
  def _RunCmd(self, node_name, cmd):
2645
    """Runs a command on the local or a remote machine.
2646

2647
    @type node_name: string
2648
    @param node_name: Machine name
2649
    @type cmd: list
2650
    @param cmd: Command
2651

2652
    """
2653
    if node_name is None or node_name == self.master_node:
2654
      # No need to use SSH
2655
      result = utils.RunCmd(cmd)
2656
    else:
2657
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2658
                            utils.ShellQuoteArgs(cmd))
2659

    
2660
    if result.failed:
2661
      errmsg = ["Failed to run command %s" % result.cmd]
2662
      if node_name:
2663
        errmsg.append("on node %s" % node_name)
2664
      errmsg.append(": exitcode %s and error %s" %
2665
                    (result.exit_code, result.output))
2666
      raise errors.OpExecError(" ".join(errmsg))
2667

    
2668
  def Call(self, fn, *args):
2669
    """Call function while all daemons are stopped.
2670

2671
    @type fn: callable
2672
    @param fn: Function to be called
2673

2674
    """
2675
    # Pause watcher by acquiring an exclusive lock on watcher state file
2676
    self.feedback_fn("Blocking watcher")
2677
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2678
    try:
2679
      # TODO: Currently, this just blocks. There's no timeout.
2680
      # TODO: Should it be a shared lock?
2681
      watcher_block.Exclusive(blocking=True)
2682

    
2683
      # Stop master daemons, so that no new jobs can come in and all running
2684
      # ones are finished
2685
      self.feedback_fn("Stopping master daemons")
2686
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2687
      try:
2688
        # Stop daemons on all nodes
2689
        for node_name in self.online_nodes:
2690
          self.feedback_fn("Stopping daemons on %s" % node_name)
2691
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2692

    
2693
        # All daemons are shut down now
2694
        try:
2695
          return fn(self, *args)
2696
        except Exception, err:
2697
          _, errmsg = FormatError(err)
2698
          logging.exception("Caught exception")
2699
          self.feedback_fn(errmsg)
2700
          raise
2701
      finally:
2702
        # Start cluster again, master node last
2703
        for node_name in self.nonmaster_nodes + [self.master_node]:
2704
          self.feedback_fn("Starting daemons on %s" % node_name)
2705
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2706
    finally:
2707
      # Resume watcher
2708
      watcher_block.Close()
2709

    
2710

    
2711
def RunWhileClusterStopped(feedback_fn, fn, *args):
2712
  """Calls a function while all cluster daemons are stopped.
2713

2714
  @type feedback_fn: callable
2715
  @param feedback_fn: Feedback function
2716
  @type fn: callable
2717
  @param fn: Function to be called when daemons are stopped
2718

2719
  """
2720
  feedback_fn("Gathering cluster information")
2721

    
2722
  # This ensures we're running on the master daemon
2723
  cl = GetClient()
2724

    
2725
  (cluster_name, master_node) = \
2726
    cl.QueryConfigValues(["cluster_name", "master_node"])
2727

    
2728
  online_nodes = GetOnlineNodes([], cl=cl)
2729

    
2730
  # Don't keep a reference to the client. The master daemon will go away.
2731
  del cl
2732

    
2733
  assert master_node in online_nodes
2734

    
2735
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2736
                                       online_nodes).Call(fn, *args)
2737

    
2738

    
2739
def GenerateTable(headers, fields, separator, data,
2740
                  numfields=None, unitfields=None,
2741
                  units=None):
2742
  """Prints a table with headers and different fields.
2743

2744
  @type headers: dict
2745
  @param headers: dictionary mapping field names to headers for
2746
      the table
2747
  @type fields: list
2748
  @param fields: the field names corresponding to each row in
2749
      the data field
2750
  @param separator: the separator to be used; if this is None,
2751
      the default 'smart' algorithm is used which computes optimal
2752
      field width, otherwise just the separator is used between
2753
      each field
2754
  @type data: list
2755
  @param data: a list of lists, each sublist being one row to be output
2756
  @type numfields: list
2757
  @param numfields: a list with the fields that hold numeric
2758
      values and thus should be right-aligned
2759
  @type unitfields: list
2760
  @param unitfields: a list with the fields that hold numeric
2761
      values that should be formatted with the units field
2762
  @type units: string or None
2763
  @param units: the units we should use for formatting, or None for
2764
      automatic choice (human-readable for non-separator usage, otherwise
2765
      megabytes); this is a one-letter string
2766

2767
  """
2768
  if units is None:
2769
    if separator:
2770
      units = "m"
2771
    else:
2772
      units = "h"
2773

    
2774
  if numfields is None:
2775
    numfields = []
2776
  if unitfields is None:
2777
    unitfields = []
2778

    
2779
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2780
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2781

    
2782
  format_fields = []
2783
  for field in fields:
2784
    if headers and field not in headers:
2785
      # TODO: handle better unknown fields (either revert to old
2786
      # style of raising exception, or deal more intelligently with
2787
      # variable fields)
2788
      headers[field] = field
2789
    if separator is not None:
2790
      format_fields.append("%s")
2791
    elif numfields.Matches(field):
2792
      format_fields.append("%*s")
2793
    else:
2794
      format_fields.append("%-*s")
2795

    
2796
  if separator is None:
2797
    mlens = [0 for name in fields]
2798
    format_str = " ".join(format_fields)
2799
  else:
2800
    format_str = separator.replace("%", "%%").join(format_fields)
2801

    
2802
  for row in data:
2803
    if row is None:
2804
      continue
2805
    for idx, val in enumerate(row):
2806
      if unitfields.Matches(fields[idx]):
2807
        try:
2808
          val = int(val)
2809
        except (TypeError, ValueError):
2810
          pass
2811
        else:
2812
          val = row[idx] = utils.FormatUnit(val, units)
2813
      val = row[idx] = str(val)
2814
      if separator is None:
2815
        mlens[idx] = max(mlens[idx], len(val))
2816

    
2817
  result = []
2818
  if headers:
2819
    args = []
2820
    for idx, name in enumerate(fields):
2821
      hdr = headers[name]
2822
      if separator is None:
2823
        mlens[idx] = max(mlens[idx], len(hdr))
2824
        args.append(mlens[idx])
2825
      args.append(hdr)
2826
    result.append(format_str % tuple(args))
2827

    
2828
  if separator is None:
2829
    assert len(mlens) == len(fields)
2830

    
2831
    if fields and not numfields.Matches(fields[-1]):
2832
      mlens[-1] = 0
2833

    
2834
  for line in data:
2835
    args = []
2836
    if line is None:
2837
      line = ["-" for _ in fields]
2838
    for idx in range(len(fields)):
2839
      if separator is None:
2840
        args.append(mlens[idx])
2841
      args.append(line[idx])
2842
    result.append(format_str % tuple(args))
2843

    
2844
  return result
2845

    
2846

    
2847
def _FormatBool(value):
2848
  """Formats a boolean value as a string.
2849

2850
  """
2851
  if value:
2852
    return "Y"
2853
  return "N"
2854

    
2855

    
2856
#: Default formatting for query results; (callback, align right)
2857
_DEFAULT_FORMAT_QUERY = {
2858
  constants.QFT_TEXT: (str, False),
2859
  constants.QFT_BOOL: (_FormatBool, False),
2860
  constants.QFT_NUMBER: (str, True),
2861
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2862
  constants.QFT_OTHER: (str, False),
2863
  constants.QFT_UNKNOWN: (str, False),
2864
  }
2865

    
2866

    
2867
def _GetColumnFormatter(fdef, override, unit):
2868
  """Returns formatting function for a field.
2869

2870
  @type fdef: L{objects.QueryFieldDefinition}
2871
  @type override: dict
2872
  @param override: Dictionary for overriding field formatting functions,
2873
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2874
  @type unit: string
2875
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2876
  @rtype: tuple; (callable, bool)
2877
  @return: Returns the function to format a value (takes one parameter) and a
2878
    boolean for aligning the value on the right-hand side
2879

2880
  """
2881
  fmt = override.get(fdef.name, None)
2882
  if fmt is not None:
2883
    return fmt
2884

    
2885
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2886

    
2887
  if fdef.kind == constants.QFT_UNIT:
2888
    # Can't keep this information in the static dictionary
2889
    return (lambda value: utils.FormatUnit(value, unit), True)
2890

    
2891
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2892
  if fmt is not None:
2893
    return fmt
2894

    
2895
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2896

    
2897

    
2898
class _QueryColumnFormatter:
2899
  """Callable class for formatting fields of a query.
2900

2901
  """
2902
  def __init__(self, fn, status_fn, verbose):
2903
    """Initializes this class.
2904

2905
    @type fn: callable
2906
    @param fn: Formatting function
2907
    @type status_fn: callable
2908
    @param status_fn: Function to report fields' status
2909
    @type verbose: boolean
2910
    @param verbose: whether to use verbose field descriptions or not
2911

2912
    """
2913
    self._fn = fn
2914
    self._status_fn = status_fn
2915
    self._verbose = verbose
2916

    
2917
  def __call__(self, data):
2918
    """Returns a field's string representation.
2919

2920
    """
2921
    (status, value) = data
2922

    
2923
    # Report status
2924
    self._status_fn(status)
2925

    
2926
    if status == constants.RS_NORMAL:
2927
      return self._fn(value)
2928

    
2929
    assert value is None, \
2930
           "Found value %r for abnormal status %s" % (value, status)
2931

    
2932
    return FormatResultError(status, self._verbose)
2933

    
2934

    
2935
def FormatResultError(status, verbose):
2936
  """Formats result status other than L{constants.RS_NORMAL}.
2937

2938
  @param status: The result status
2939
  @type verbose: boolean
2940
  @param verbose: Whether to return the verbose text
2941
  @return: Text of result status
2942

2943
  """
2944
  assert status != constants.RS_NORMAL, \
2945
         "FormatResultError called with status equal to constants.RS_NORMAL"
2946
  try:
2947
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2948
  except KeyError:
2949
    raise NotImplementedError("Unknown status %s" % status)
2950
  else:
2951
    if verbose:
2952
      return verbose_text
2953
    return normal_text
2954

    
2955

    
2956
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2957
                      header=False, verbose=False):
2958
  """Formats data in L{objects.QueryResponse}.
2959

2960
  @type result: L{objects.QueryResponse}
2961
  @param result: result of query operation
2962
  @type unit: string
2963
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2964
    see L{utils.text.FormatUnit}
2965
  @type format_override: dict
2966
  @param format_override: Dictionary for overriding field formatting functions,
2967
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2968
  @type separator: string or None
2969
  @param separator: String used to separate fields
2970
  @type header: bool
2971
  @param header: Whether to output header row
2972
  @type verbose: boolean
2973
  @param verbose: whether to use verbose field descriptions or not
2974

2975
  """
2976
  if unit is None:
2977
    if separator:
2978
      unit = "m"
2979
    else:
2980
      unit = "h"
2981

    
2982
  if format_override is None:
2983
    format_override = {}
2984

    
2985
  stats = dict.fromkeys(constants.RS_ALL, 0)
2986

    
2987
  def _RecordStatus(status):
2988
    if status in stats:
2989
      stats[status] += 1
2990

    
2991
  columns = []
2992
  for fdef in result.fields:
2993
    assert fdef.title and fdef.name
2994
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2995
    columns.append(TableColumn(fdef.title,
2996
                               _QueryColumnFormatter(fn, _RecordStatus,
2997
                                                     verbose),
2998
                               align_right))
2999

    
3000
  table = FormatTable(result.data, columns, header, separator)
3001

    
3002
  # Collect statistics
3003
  assert len(stats) == len(constants.RS_ALL)
3004
  assert compat.all(count >= 0 for count in stats.values())
3005

    
3006
  # Determine overall status. If there was no data, unknown fields must be
3007
  # detected via the field definitions.
3008
  if (stats[constants.RS_UNKNOWN] or
3009
      (not result.data and _GetUnknownFields(result.fields))):
3010
    status = QR_UNKNOWN
3011
  elif compat.any(count > 0 for key, count in stats.items()
3012
                  if key != constants.RS_NORMAL):
3013
    status = QR_INCOMPLETE
3014
  else:
3015
    status = QR_NORMAL
3016

    
3017
  return (status, table)
3018

    
3019

    
3020
def _GetUnknownFields(fdefs):
3021
  """Returns list of unknown fields included in C{fdefs}.
3022

3023
  @type fdefs: list of L{objects.QueryFieldDefinition}
3024

3025
  """
3026
  return [fdef for fdef in fdefs
3027
          if fdef.kind == constants.QFT_UNKNOWN]
3028

    
3029

    
3030
def _WarnUnknownFields(fdefs):
3031
  """Prints a warning to stderr if a query included unknown fields.
3032

3033
  @type fdefs: list of L{objects.QueryFieldDefinition}
3034

3035
  """
3036
  unknown = _GetUnknownFields(fdefs)
3037
  if unknown:
3038
    ToStderr("Warning: Queried for unknown fields %s",
3039
             utils.CommaJoin(fdef.name for fdef in unknown))
3040
    return True
3041

    
3042
  return False
3043

    
3044

    
3045
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3046
                format_override=None, verbose=False, force_filter=False,
3047
                namefield=None, qfilter=None, isnumeric=False):
3048
  """Generic implementation for listing all items of a resource.
3049

3050
  @param resource: One of L{constants.QR_VIA_LUXI}
3051
  @type fields: list of strings
3052
  @param fields: List of fields to query for
3053
  @type names: list of strings
3054
  @param names: Names of items to query for
3055
  @type unit: string or None
3056
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3057
    None for automatic choice (human-readable for non-separator usage,
3058
    otherwise megabytes); this is a one-letter string
3059
  @type separator: string or None
3060
  @param separator: String used to separate fields
3061
  @type header: bool
3062
  @param header: Whether to show header row
3063
  @type force_filter: bool
3064
  @param force_filter: Whether to always treat names as filter
3065
  @type format_override: dict
3066
  @param format_override: Dictionary for overriding field formatting functions,
3067
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3068
  @type verbose: boolean
3069
  @param verbose: whether to use verbose field descriptions or not
3070
  @type namefield: string
3071
  @param namefield: Name of field to use for simple filters (see
3072
    L{qlang.MakeFilter} for details)
3073
  @type qfilter: list or None
3074
  @param qfilter: Query filter (in addition to names)
3075
  @param isnumeric: bool
3076
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3077
    any simple filters built by namefield should use integer values to
3078
    reflect that
3079

3080
  """
3081
  if not names:
3082
    names = None
3083

    
3084
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3085
                                isnumeric=isnumeric)
3086

    
3087
  if qfilter is None:
3088
    qfilter = namefilter
3089
  elif namefilter is not None:
3090
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3091

    
3092
  if cl is None:
3093
    cl = GetClient()
3094

    
3095
  response = cl.Query(resource, fields, qfilter)
3096

    
3097
  found_unknown = _WarnUnknownFields(response.fields)
3098

    
3099
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3100
                                     header=header,
3101
                                     format_override=format_override,
3102
                                     verbose=verbose)
3103

    
3104
  for line in data:
3105
    ToStdout(line)
3106

    
3107
  assert ((found_unknown and status == QR_UNKNOWN) or
3108
          (not found_unknown and status != QR_UNKNOWN))
3109

    
3110
  if status == QR_UNKNOWN:
3111
    return constants.EXIT_UNKNOWN_FIELD
3112

    
3113
  # TODO: Should the list command fail if not all data could be collected?
3114
  return constants.EXIT_SUCCESS
3115

    
3116

    
3117
def _FieldDescValues(fdef):
3118
  """Helper function for L{GenericListFields} to get query field description.
3119

3120
  @type fdef: L{objects.QueryFieldDefinition}
3121
  @rtype: list
3122

3123
  """
3124
  return [
3125
    fdef.name,
3126
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3127
    fdef.title,
3128
    fdef.doc,
3129
    ]
3130

    
3131

    
3132
def GenericListFields(resource, fields, separator, header, cl=None):
3133
  """Generic implementation for listing fields for a resource.
3134

3135
  @param resource: One of L{constants.QR_VIA_LUXI}
3136
  @type fields: list of strings
3137
  @param fields: List of fields to query for
3138
  @type separator: string or None
3139
  @param separator: String used to separate fields
3140
  @type header: bool
3141
  @param header: Whether to show header row
3142

3143
  """
3144
  if cl is None:
3145
    cl = GetClient()
3146

    
3147
  if not fields:
3148
    fields = None
3149

    
3150
  response = cl.QueryFields(resource, fields)
3151

    
3152
  found_unknown = _WarnUnknownFields(response.fields)
3153

    
3154
  columns = [
3155
    TableColumn("Name", str, False),
3156
    TableColumn("Type", str, False),
3157
    TableColumn("Title", str, False),
3158
    TableColumn("Description", str, False),
3159
    ]
3160

    
3161
  rows = map(_FieldDescValues, response.fields)
3162

    
3163
  for line in FormatTable(rows, columns, header, separator):
3164
    ToStdout(line)
3165

    
3166
  if found_unknown:
3167
    return constants.EXIT_UNKNOWN_FIELD
3168

    
3169
  return constants.EXIT_SUCCESS
3170

    
3171

    
3172
class TableColumn:
3173
  """Describes a column for L{FormatTable}.
3174

3175
  """
3176
  def __init__(self, title, fn, align_right):
3177
    """Initializes this class.
3178

3179
    @type title: string
3180
    @param title: Column title
3181
    @type fn: callable
3182
    @param fn: Formatting function
3183
    @type align_right: bool
3184
    @param align_right: Whether to align values on the right-hand side
3185

3186
    """
3187
    self.title = title
3188
    self.format = fn
3189
    self.align_right = align_right
3190

    
3191

    
3192
def _GetColFormatString(width, align_right):
3193
  """Returns the format string for a field.
3194

3195
  """
3196
  if align_right:
3197
    sign = ""
3198
  else:
3199
    sign = "-"
3200

    
3201
  return "%%%s%ss" % (sign, width)
3202

    
3203

    
3204
def FormatTable(rows, columns, header, separator):
3205
  """Formats data as a table.
3206

3207
  @type rows: list of lists
3208
  @param rows: Row data, one list per row
3209
  @type columns: list of L{TableColumn}
3210
  @param columns: Column descriptions
3211
  @type header: bool
3212
  @param header: Whether to show header row
3213
  @type separator: string or None
3214
  @param separator: String used to separate columns
3215

3216
  """
3217
  if header:
3218
    data = [[col.title for col in columns]]
3219
    colwidth = [len(col.title) for col in columns]
3220
  else:
3221
    data = []
3222
    colwidth = [0 for _ in columns]
3223

    
3224
  # Format row data
3225
  for row in rows:
3226
    assert len(row) == len(columns)
3227

    
3228
    formatted = [col.format(value) for value, col in zip(row, columns)]
3229

    
3230
    if separator is None:
3231
      # Update column widths
3232
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3233
        # Modifying a list's items while iterating is fine
3234
        colwidth[idx] = max(oldwidth, len(value))
3235

    
3236
    data.append(formatted)
3237

    
3238
  if separator is not None:
3239
    # Return early if a separator is used
3240
    return [separator.join(row) for row in data]
3241

    
3242
  if columns and not columns[-1].align_right:
3243
    # Avoid unnecessary spaces at end of line
3244
    colwidth[-1] = 0
3245

    
3246
  # Build format string
3247
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3248
                  for col, width in zip(columns, colwidth)])
3249

    
3250
  return [fmt % tuple(row) for row in data]
3251

    
3252

    
3253
def FormatTimestamp(ts):
3254
  """Formats a given timestamp.
3255

3256
  @type ts: timestamp
3257
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3258

3259
  @rtype: string
3260
  @return: a string with the formatted timestamp
3261

3262
  """
3263
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3264
    return "?"
3265

    
3266
  (sec, usecs) = ts
3267
  return utils.FormatTime(sec, usecs=usecs)
3268

    
3269

    
3270
def ParseTimespec(value):
3271
  """Parse a time specification.
3272

3273
  The following suffixed will be recognized:
3274

3275
    - s: seconds
3276
    - m: minutes
3277
    - h: hours
3278
    - d: day
3279
    - w: weeks
3280

3281
  Without any suffix, the value will be taken to be in seconds.
3282

3283
  """
3284
  value = str(value)
3285
  if not value:
3286
    raise errors.OpPrereqError("Empty time specification passed",
3287
                               errors.ECODE_INVAL)
3288
  suffix_map = {
3289
    "s": 1,
3290
    "m": 60,
3291
    "h": 3600,
3292
    "d": 86400,
3293
    "w": 604800,
3294
    }
3295
  if value[-1] not in suffix_map:
3296
    try:
3297
      value = int(value)
3298
    except (TypeError, ValueError):
3299
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3300
                                 errors.ECODE_INVAL)
3301
  else:
3302
    multiplier = suffix_map[value[-1]]
3303
    value = value[:-1]
3304
    if not value: # no data left after stripping the suffix
3305
      raise errors.OpPrereqError("Invalid time specification (only"
3306
                                 " suffix passed)", errors.ECODE_INVAL)
3307
    try:
3308
      value = int(value) * multiplier
3309
    except (TypeError, ValueError):
3310
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3311
                                 errors.ECODE_INVAL)
3312
  return value
3313

    
3314

    
3315
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3316
                   filter_master=False, nodegroup=None):
3317
  """Returns the names of online nodes.
3318

3319
  This function will also log a warning on stderr with the names of
3320
  the online nodes.
3321

3322
  @param nodes: if not empty, use only this subset of nodes (minus the
3323
      offline ones)
3324
  @param cl: if not None, luxi client to use
3325
  @type nowarn: boolean
3326
  @param nowarn: by default, this function will output a note with the
3327
      offline nodes that are skipped; if this parameter is True the
3328
      note is not displayed
3329
  @type secondary_ips: boolean
3330
  @param secondary_ips: if True, return the secondary IPs instead of the
3331
      names, useful for doing network traffic over the replication interface
3332
      (if any)
3333
  @type filter_master: boolean
3334
  @param filter_master: if True, do not return the master node in the list
3335
      (useful in coordination with secondary_ips where we cannot check our
3336
      node name against the list)
3337
  @type nodegroup: string
3338
  @param nodegroup: If set, only return nodes in this node group
3339

3340
  """
3341
  if cl is None:
3342
    cl = GetClient()
3343

    
3344
  qfilter = []
3345

    
3346
  if nodes:
3347
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3348

    
3349
  if nodegroup is not None:
3350
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3351
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3352

    
3353
  if filter_master:
3354
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3355

    
3356
  if qfilter:
3357
    if len(qfilter) > 1:
3358
      final_filter = [qlang.OP_AND] + qfilter
3359
    else:
3360
      assert len(qfilter) == 1
3361
      final_filter = qfilter[0]
3362
  else:
3363
    final_filter = None
3364

    
3365
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3366

    
3367
  def _IsOffline(row):
3368
    (_, (_, offline), _) = row
3369
    return offline
3370

    
3371
  def _GetName(row):
3372
    ((_, name), _, _) = row
3373
    return name
3374

    
3375
  def _GetSip(row):
3376
    (_, _, (_, sip)) = row
3377
    return sip
3378

    
3379
  (offline, online) = compat.partition(result.data, _IsOffline)
3380

    
3381
  if offline and not nowarn:
3382
    ToStderr("Note: skipping offline node(s): %s" %
3383
             utils.CommaJoin(map(_GetName, offline)))
3384

    
3385
  if secondary_ips:
3386
    fn = _GetSip
3387
  else:
3388
    fn = _GetName
3389

    
3390
  return map(fn, online)
3391

    
3392

    
3393
def _ToStream(stream, txt, *args):
3394
  """Write a message to a stream, bypassing the logging system
3395

3396
  @type stream: file object
3397
  @param stream: the file to which we should write
3398
  @type txt: str
3399
  @param txt: the message
3400

3401
  """
3402
  try:
3403
    if args:
3404
      args = tuple(args)
3405
      stream.write(txt % args)
3406
    else:
3407
      stream.write(txt)
3408
    stream.write("\n")
3409
    stream.flush()
3410
  except IOError, err:
3411
    if err.errno == errno.EPIPE:
3412
      # our terminal went away, we'll exit
3413
      sys.exit(constants.EXIT_FAILURE)
3414
    else:
3415
      raise
3416

    
3417

    
3418
def ToStdout(txt, *args):
3419
  """Write a message to stdout only, bypassing the logging system
3420

3421
  This is just a wrapper over _ToStream.
3422

3423
  @type txt: str
3424
  @param txt: the message
3425

3426
  """
3427
  _ToStream(sys.stdout, txt, *args)
3428

    
3429

    
3430
def ToStderr(txt, *args):
3431
  """Write a message to stderr only, bypassing the logging system
3432

3433
  This is just a wrapper over _ToStream.
3434

3435
  @type txt: str
3436
  @param txt: the message
3437

3438
  """
3439
  _ToStream(sys.stderr, txt, *args)
3440

    
3441

    
3442
class JobExecutor(object):
3443
  """Class which manages the submission and execution of multiple jobs.
3444

3445
  Note that instances of this class should not be reused between
3446
  GetResults() calls.
3447

3448
  """
3449
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3450
    self.queue = []
3451
    if cl is None:
3452
      cl = GetClient()
3453
    self.cl = cl
3454
    self.verbose = verbose
3455
    self.jobs = []
3456
    self.opts = opts
3457
    self.feedback_fn = feedback_fn
3458
    self._counter = itertools.count()
3459

    
3460
  @staticmethod
3461
  def _IfName(name, fmt):
3462
    """Helper function for formatting name.
3463

3464
    """
3465
    if name:
3466
      return fmt % name
3467

    
3468
    return ""
3469

    
3470
  def QueueJob(self, name, *ops):
3471
    """Record a job for later submit.
3472

3473
    @type name: string
3474
    @param name: a description of the job, will be used in WaitJobSet
3475

3476
    """
3477
    SetGenericOpcodeOpts(ops, self.opts)
3478
    self.queue.append((self._counter.next(), name, ops))
3479

    
3480
  def AddJobId(self, name, status, job_id):
3481
    """Adds a job ID to the internal queue.
3482

3483
    """
3484
    self.jobs.append((self._counter.next(), status, job_id, name))
3485

    
3486
  def SubmitPending(self, each=False):
3487
    """Submit all pending jobs.
3488

3489
    """
3490
    if each:
3491
      results = []
3492
      for (_, _, ops) in self.queue:
3493
        # SubmitJob will remove the success status, but raise an exception if
3494
        # the submission fails, so we'll notice that anyway.
3495
        results.append([True, self.cl.SubmitJob(ops)[0]])
3496
    else:
3497
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3498
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3499
      self.jobs.append((idx, status, data, name))
3500

    
3501
  def _ChooseJob(self):
3502
    """Choose a non-waiting/queued job to poll next.
3503

3504
    """
3505
    assert self.jobs, "_ChooseJob called with empty job list"
3506

    
3507
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3508
                               ["status"])
3509
    assert result
3510

    
3511
    for job_data, status in zip(self.jobs, result):
3512
      if (isinstance(status, list) and status and
3513
          status[0] in (constants.JOB_STATUS_QUEUED,
3514
                        constants.JOB_STATUS_WAITING,
3515
                        constants.JOB_STATUS_CANCELING)):
3516
        # job is still present and waiting
3517
        continue
3518
      # good candidate found (either running job or lost job)
3519
      self.jobs.remove(job_data)
3520
      return job_data
3521

    
3522
    # no job found
3523
    return self.jobs.pop(0)
3524

    
3525
  def GetResults(self):
3526
    """Wait for and return the results of all jobs.
3527

3528
    @rtype: list
3529
    @return: list of tuples (success, job results), in the same order
3530
        as the submitted jobs; if a job has failed, instead of the result
3531
        there will be the error message
3532

3533
    """
3534
    if not self.jobs:
3535
      self.SubmitPending()
3536
    results = []
3537
    if self.verbose:
3538
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3539
      if ok_jobs:
3540
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3541

    
3542
    # first, remove any non-submitted jobs
3543
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3544
    for idx, _, jid, name in failures:
3545
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3546
      results.append((idx, False, jid))
3547

    
3548
    while self.jobs:
3549
      (idx, _, jid, name) = self._ChooseJob()
3550
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3551
      try:
3552
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3553
        success = True
3554
      except errors.JobLost, err:
3555
        _, job_result = FormatError(err)
3556
        ToStderr("Job %s%s has been archived, cannot check its result",
3557
                 jid, self._IfName(name, " for %s"))
3558
        success = False
3559
      except (errors.GenericError, luxi.ProtocolError), err:
3560
        _, job_result = FormatError(err)
3561
        success = False
3562
        # the error message will always be shown, verbose or not
3563
        ToStderr("Job %s%s has failed: %s",
3564
                 jid, self._IfName(name, " for %s"), job_result)
3565

    
3566
      results.append((idx, success, job_result))
3567

    
3568
    # sort based on the index, then drop it
3569
    results.sort()
3570
    results = [i[1:] for i in results]
3571

    
3572
    return results
3573

    
3574
  def WaitOrShow(self, wait):
3575
    """Wait for job results or only print the job IDs.
3576

3577
    @type wait: boolean
3578
    @param wait: whether to wait or not
3579

3580
    """
3581
    if wait:
3582
      return self.GetResults()
3583
    else:
3584
      if not self.jobs:
3585
        self.SubmitPending()
3586
      for _, status, result, name in self.jobs:
3587
        if status:
3588
          ToStdout("%s: %s", result, name)
3589
        else:
3590
          ToStderr("Failure for %s: %s", name, result)
3591
      return [row[1:3] for row in self.jobs]
3592

    
3593

    
3594
def FormatParameterDict(buf, param_dict, actual, level=1):
3595
  """Formats a parameter dictionary.
3596

3597
  @type buf: L{StringIO}
3598
  @param buf: the buffer into which to write
3599
  @type param_dict: dict
3600
  @param param_dict: the own parameters
3601
  @type actual: dict
3602
  @param actual: the current parameter set (including defaults)
3603
  @param level: Level of indent
3604

3605
  """
3606
  indent = "  " * level
3607

    
3608
  for key in sorted(actual):
3609
    data = actual[key]
3610
    buf.write("%s- %s:" % (indent, key))
3611

    
3612
    if isinstance(data, dict) and data:
3613
      buf.write("\n")
3614
      FormatParameterDict(buf, param_dict.get(key, {}), data,
3615
                          level=level + 1)
3616
    else:
3617
      val = param_dict.get(key, "default (%s)" % data)
3618
      buf.write(" %s\n" % val)
3619

    
3620

    
3621
def FormatParamsDictInfo(param_dict, actual):
3622
  """Formats a parameter dictionary.
3623

3624
  @type param_dict: dict
3625
  @param param_dict: the own parameters
3626
  @type actual: dict
3627
  @param actual: the current parameter set (including defaults)
3628
  @rtype: dict
3629
  @return: dictionary where the value of each parameter is either a fully
3630
      formatted string or a dictionary containing formatted strings
3631

3632
  """
3633
  ret = {}
3634
  for (key, data) in actual.items():
3635
    if isinstance(data, dict) and data:
3636
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3637
    else:
3638
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3639
  return ret
3640

    
3641

    
3642
def ConfirmOperation(names, list_type, text, extra=""):
3643
  """Ask the user to confirm an operation on a list of list_type.
3644

3645
  This function is used to request confirmation for doing an operation
3646
  on a given list of list_type.
3647

3648
  @type names: list
3649
  @param names: the list of names that we display when
3650
      we ask for confirmation
3651
  @type list_type: str
3652
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3653
  @type text: str
3654
  @param text: the operation that the user should confirm
3655
  @rtype: boolean
3656
  @return: True or False depending on user's confirmation.
3657

3658
  """
3659
  count = len(names)
3660
  msg = ("The %s will operate on %d %s.\n%s"
3661
         "Do you want to continue?" % (text, count, list_type, extra))
3662
  affected = (("\nAffected %s:\n" % list_type) +
3663
              "\n".join(["  %s" % name for name in names]))
3664

    
3665
  choices = [("y", True, "Yes, execute the %s" % text),
3666
             ("n", False, "No, abort the %s" % text)]
3667

    
3668
  if count > 20:
3669
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3670
    question = msg
3671
  else:
3672
    question = msg + affected
3673

    
3674
  choice = AskUser(question, choices)
3675
  if choice == "v":
3676
    choices.pop(1)
3677
    choice = AskUser(msg + affected, choices)
3678
  return choice
3679

    
3680

    
3681
def _MaybeParseUnit(elements):
3682
  """Parses and returns an array of potential values with units.
3683

3684
  """
3685
  parsed = {}
3686
  for k, v in elements.items():
3687
    if v == constants.VALUE_DEFAULT:
3688
      parsed[k] = v
3689
    else:
3690
      parsed[k] = utils.ParseUnit(v)
3691
  return parsed
3692

    
3693

    
3694
def _InitIspecsFromOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3695
                        ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3696
                        group_ipolicy, allowed_values):
3697
  try:
3698
    if ispecs_mem_size:
3699
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3700
    if ispecs_disk_size:
3701
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3702
  except (TypeError, ValueError, errors.UnitParseError), err:
3703
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3704
                               " in policy: %s" %
3705
                               (ispecs_disk_size, ispecs_mem_size, err),
3706
                               errors.ECODE_INVAL)
3707

    
3708
  # prepare ipolicy dict
3709
  ispecs_transposed = {
3710
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3711
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3712
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3713
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3714
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3715
    }
3716

    
3717
  # first, check that the values given are correct
3718
  if group_ipolicy:
3719
    forced_type = TISPECS_GROUP_TYPES
3720
  else:
3721
    forced_type = TISPECS_CLUSTER_TYPES
3722
  for specs in ispecs_transposed.values():
3723
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3724

    
3725
  # then transpose
3726
  ispecs = {
3727
    constants.ISPECS_MIN: {},
3728
    constants.ISPECS_MAX: {},
3729
    constants.ISPECS_STD: {},
3730
    }
3731
  for (name, specs) in ispecs_transposed.iteritems():
3732
    assert name in constants.ISPECS_PARAMETERS
3733
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3734
      assert key in ispecs
3735
      ispecs[key][name] = val
3736
  for key in constants.ISPECS_MINMAX_KEYS:
3737
    ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
3738
  ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3739

    
3740

    
3741
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3742
                          ispecs_cpu_count=None,
3743
                          ispecs_disk_count=None,
3744
                          ispecs_disk_size=None,
3745
                          ispecs_nic_count=None,
3746
                          ipolicy_disk_templates=None,
3747
                          ipolicy_vcpu_ratio=None,
3748
                          ipolicy_spindle_ratio=None,
3749
                          group_ipolicy=False,
3750
                          allowed_values=None,
3751
                          fill_all=False):
3752
  """Creation of instance policy based on command line options.
3753

3754
  @param fill_all: whether for cluster policies we should ensure that
3755
    all values are filled
3756

3757

3758
  """
3759

    
3760
  ipolicy_out = objects.MakeEmptyIPolicy()
3761
  _InitIspecsFromOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
3762
                      ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3763
                      group_ipolicy, allowed_values)
3764

    
3765
  if ipolicy_disk_templates is not None:
3766
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3767
  if ipolicy_vcpu_ratio is not None:
3768
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3769
  if ipolicy_spindle_ratio is not None:
3770
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3771

    
3772
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3773

    
3774
  if not group_ipolicy and fill_all:
3775
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
3776

    
3777
  return ipolicy_out
3778

    
3779

    
3780
def _SerializeGenericInfo(buf, data, level, afterkey=False):
3781
  """Formatting core of L{PrintGenericInfo}.
3782

3783
  @param buf: (string) stream to accumulate the result into
3784
  @param data: data to format
3785
  @type level: int
3786
  @param level: depth in the data hierarchy, used for indenting
3787
  @type afterkey: bool
3788
  @param afterkey: True when we are in the middle of a line after a key (used
3789
      to properly add newlines or indentation)
3790

3791
  """
3792
  baseind = "  "
3793
  if isinstance(data, dict):
3794
    if not data:
3795
      buf.write("\n")
3796
    else:
3797
      if afterkey:
3798
        buf.write("\n")
3799
        doindent = True
3800
      else:
3801
        doindent = False
3802
      for key in sorted(data):
3803
        if doindent:
3804
          buf.write(baseind * level)
3805
        else:
3806
          doindent = True
3807
        buf.write(key)
3808
        buf.write(": ")
3809
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
3810
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
3811
    # list of tuples (an ordered dictionary)
3812
    if afterkey:
3813
      buf.write("\n")
3814
      doindent = True
3815
    else:
3816
      doindent = False
3817
    for (key, val) in data:
3818
      if doindent:
3819
        buf.write(baseind * level)
3820
      else:
3821
        doindent = True
3822
      buf.write(key)
3823
      buf.write(": ")
3824
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
3825
  elif isinstance(data, list):
3826
    if not data:
3827
      buf.write("\n")
3828
    else:
3829
      if afterkey:
3830
        buf.write("\n")
3831
        doindent = True
3832
      else:
3833
        doindent = False
3834
      for item in data:
3835
        if doindent:
3836
          buf.write(baseind * level)
3837
        else:
3838
          doindent = True
3839
        buf.write("-")
3840
        buf.write(baseind[1:])
3841
        _SerializeGenericInfo(buf, item, level + 1)
3842
  else:
3843
    # This branch should be only taken for strings, but it's practically
3844
    # impossible to guarantee that no other types are produced somewhere
3845
    buf.write(str(data))
3846
    buf.write("\n")
3847

    
3848

    
3849
def PrintGenericInfo(data):
3850
  """Print information formatted according to the hierarchy.
3851

3852
  The output is a valid YAML string.
3853

3854
  @param data: the data to print. It's a hierarchical structure whose elements
3855
      can be:
3856
        - dictionaries, where keys are strings and values are of any of the
3857
          types listed here
3858
        - lists of pairs (key, value), where key is a string and value is of
3859
          any of the types listed here; it's a way to encode ordered
3860
          dictionaries
3861
        - lists of any of the types listed here
3862
        - strings
3863

3864
  """
3865
  buf = StringIO()
3866
  _SerializeGenericInfo(buf, data, 0)
3867
  ToStdout(buf.getvalue().rstrip("\n"))