Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ da5f09ef

History | View | Annotate | Download (126.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_STORAGE_TYPES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HVLIST_OPT",
99
  "HVOPTS_OPT",
100
  "HYPERVISOR_OPT",
101
  "IALLOCATOR_OPT",
102
  "DEFAULT_IALLOCATOR_OPT",
103
  "IDENTIFY_DEFAULTS_OPT",
104
  "IGNORE_CONSIST_OPT",
105
  "IGNORE_ERRORS_OPT",
106
  "IGNORE_FAILURES_OPT",
107
  "IGNORE_OFFLINE_OPT",
108
  "IGNORE_REMOVE_FAILURES_OPT",
109
  "IGNORE_SECONDARIES_OPT",
110
  "IGNORE_SIZE_OPT",
111
  "INTERVAL_OPT",
112
  "MAC_PREFIX_OPT",
113
  "MAINTAIN_NODE_HEALTH_OPT",
114
  "MASTER_NETDEV_OPT",
115
  "MASTER_NETMASK_OPT",
116
  "MC_OPT",
117
  "MIGRATION_MODE_OPT",
118
  "NET_OPT",
119
  "NETWORK_OPT",
120
  "NETWORK6_OPT",
121
  "NEW_CLUSTER_CERT_OPT",
122
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
123
  "NEW_CONFD_HMAC_KEY_OPT",
124
  "NEW_RAPI_CERT_OPT",
125
  "NEW_SECONDARY_OPT",
126
  "NEW_SPICE_CERT_OPT",
127
  "NIC_PARAMS_OPT",
128
  "NOCONFLICTSCHECK_OPT",
129
  "NODE_FORCE_JOIN_OPT",
130
  "NODE_LIST_OPT",
131
  "NODE_PLACEMENT_OPT",
132
  "NODEGROUP_OPT",
133
  "NODE_PARAMS_OPT",
134
  "NODE_POWERED_OPT",
135
  "NODRBD_STORAGE_OPT",
136
  "NOHDR_OPT",
137
  "NOIPCHECK_OPT",
138
  "NO_INSTALL_OPT",
139
  "NONAMECHECK_OPT",
140
  "NOLVM_STORAGE_OPT",
141
  "NOMODIFY_ETCHOSTS_OPT",
142
  "NOMODIFY_SSH_SETUP_OPT",
143
  "NONICS_OPT",
144
  "NONLIVE_OPT",
145
  "NONPLUS1_OPT",
146
  "NORUNTIME_CHGS_OPT",
147
  "NOSHUTDOWN_OPT",
148
  "NOSTART_OPT",
149
  "NOSSH_KEYCHECK_OPT",
150
  "NOVOTING_OPT",
151
  "NO_REMEMBER_OPT",
152
  "NWSYNC_OPT",
153
  "OFFLINE_INST_OPT",
154
  "ONLINE_INST_OPT",
155
  "ON_PRIMARY_OPT",
156
  "ON_SECONDARY_OPT",
157
  "OFFLINE_OPT",
158
  "OSPARAMS_OPT",
159
  "OS_OPT",
160
  "OS_SIZE_OPT",
161
  "OOB_TIMEOUT_OPT",
162
  "POWER_DELAY_OPT",
163
  "PREALLOC_WIPE_DISKS_OPT",
164
  "PRIMARY_IP_VERSION_OPT",
165
  "PRIMARY_ONLY_OPT",
166
  "PRIORITY_OPT",
167
  "RAPI_CERT_OPT",
168
  "READD_OPT",
169
  "REASON_OPT",
170
  "REBOOT_TYPE_OPT",
171
  "REMOVE_INSTANCE_OPT",
172
  "REMOVE_RESERVED_IPS_OPT",
173
  "REMOVE_UIDS_OPT",
174
  "RESERVED_LVS_OPT",
175
  "RUNTIME_MEM_OPT",
176
  "ROMAN_OPT",
177
  "SECONDARY_IP_OPT",
178
  "SECONDARY_ONLY_OPT",
179
  "SELECT_OS_OPT",
180
  "SEP_OPT",
181
  "SHOWCMD_OPT",
182
  "SHOW_MACHINE_OPT",
183
  "SHUTDOWN_TIMEOUT_OPT",
184
  "SINGLE_NODE_OPT",
185
  "SPECS_CPU_COUNT_OPT",
186
  "SPECS_DISK_COUNT_OPT",
187
  "SPECS_DISK_SIZE_OPT",
188
  "SPECS_MEM_SIZE_OPT",
189
  "SPECS_NIC_COUNT_OPT",
190
  "IPOLICY_DISK_TEMPLATES",
191
  "IPOLICY_VCPU_RATIO",
192
  "SPICE_CACERT_OPT",
193
  "SPICE_CERT_OPT",
194
  "SRC_DIR_OPT",
195
  "SRC_NODE_OPT",
196
  "SUBMIT_OPT",
197
  "STARTUP_PAUSED_OPT",
198
  "STATIC_OPT",
199
  "SYNC_OPT",
200
  "TAG_ADD_OPT",
201
  "TAG_SRC_OPT",
202
  "TIMEOUT_OPT",
203
  "TO_GROUP_OPT",
204
  "UIDPOOL_OPT",
205
  "USEUNITS_OPT",
206
  "USE_EXTERNAL_MIP_SCRIPT",
207
  "USE_REPL_NET_OPT",
208
  "VERBOSE_OPT",
209
  "VG_NAME_OPT",
210
  "WFSYNC_OPT",
211
  "YES_DOIT_OPT",
212
  "DISK_STATE_OPT",
213
  "HV_STATE_OPT",
214
  "IGNORE_IPOLICY_OPT",
215
  "INSTANCE_POLICY_OPTS",
216
  # Generic functions for CLI programs
217
  "ConfirmOperation",
218
  "CreateIPolicyFromOpts",
219
  "GenericMain",
220
  "GenericInstanceCreate",
221
  "GenericList",
222
  "GenericListFields",
223
  "GetClient",
224
  "GetOnlineNodes",
225
  "JobExecutor",
226
  "JobSubmittedException",
227
  "ParseTimespec",
228
  "RunWhileClusterStopped",
229
  "SubmitOpCode",
230
  "SubmitOrSend",
231
  "UsesRPC",
232
  # Formatting functions
233
  "ToStderr", "ToStdout",
234
  "FormatError",
235
  "FormatQueryResult",
236
  "FormatParameterDict",
237
  "FormatParamsDictInfo",
238
  "PrintGenericInfo",
239
  "GenerateTable",
240
  "AskUser",
241
  "FormatTimestamp",
242
  "FormatLogMessage",
243
  # Tags functions
244
  "ListTags",
245
  "AddTags",
246
  "RemoveTags",
247
  # command line options support infrastructure
248
  "ARGS_MANY_INSTANCES",
249
  "ARGS_MANY_NODES",
250
  "ARGS_MANY_GROUPS",
251
  "ARGS_MANY_NETWORKS",
252
  "ARGS_NONE",
253
  "ARGS_ONE_INSTANCE",
254
  "ARGS_ONE_NODE",
255
  "ARGS_ONE_GROUP",
256
  "ARGS_ONE_OS",
257
  "ARGS_ONE_NETWORK",
258
  "ArgChoice",
259
  "ArgCommand",
260
  "ArgFile",
261
  "ArgGroup",
262
  "ArgHost",
263
  "ArgInstance",
264
  "ArgJobId",
265
  "ArgNetwork",
266
  "ArgNode",
267
  "ArgOs",
268
  "ArgExtStorage",
269
  "ArgSuggest",
270
  "ArgUnknown",
271
  "OPT_COMPL_INST_ADD_NODES",
272
  "OPT_COMPL_MANY_NODES",
273
  "OPT_COMPL_ONE_IALLOCATOR",
274
  "OPT_COMPL_ONE_INSTANCE",
275
  "OPT_COMPL_ONE_NODE",
276
  "OPT_COMPL_ONE_NODEGROUP",
277
  "OPT_COMPL_ONE_NETWORK",
278
  "OPT_COMPL_ONE_OS",
279
  "OPT_COMPL_ONE_EXTSTORAGE",
280
  "cli_option",
281
  "SplitNodeOption",
282
  "CalculateOSNames",
283
  "ParseFields",
284
  "COMMON_CREATE_OPTS",
285
  ]
286

    
287
NO_PREFIX = "no_"
288
UN_PREFIX = "-"
289

    
290
#: Priorities (sorted)
291
_PRIORITY_NAMES = [
292
  ("low", constants.OP_PRIO_LOW),
293
  ("normal", constants.OP_PRIO_NORMAL),
294
  ("high", constants.OP_PRIO_HIGH),
295
  ]
296

    
297
#: Priority dictionary for easier lookup
298
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
299
# we migrate to Python 2.6
300
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
301

    
302
# Query result status for clients
303
(QR_NORMAL,
304
 QR_UNKNOWN,
305
 QR_INCOMPLETE) = range(3)
306

    
307
#: Maximum batch size for ChooseJob
308
_CHOOSE_BATCH = 25
309

    
310

    
311
# constants used to create InstancePolicy dictionary
312
TISPECS_GROUP_TYPES = {
313
  constants.ISPECS_MIN: constants.VTYPE_INT,
314
  constants.ISPECS_MAX: constants.VTYPE_INT,
315
  }
316

    
317
TISPECS_CLUSTER_TYPES = {
318
  constants.ISPECS_MIN: constants.VTYPE_INT,
319
  constants.ISPECS_MAX: constants.VTYPE_INT,
320
  constants.ISPECS_STD: constants.VTYPE_INT,
321
  }
322

    
323
#: User-friendly names for query2 field types
324
_QFT_NAMES = {
325
  constants.QFT_UNKNOWN: "Unknown",
326
  constants.QFT_TEXT: "Text",
327
  constants.QFT_BOOL: "Boolean",
328
  constants.QFT_NUMBER: "Number",
329
  constants.QFT_UNIT: "Storage size",
330
  constants.QFT_TIMESTAMP: "Timestamp",
331
  constants.QFT_OTHER: "Custom",
332
  }
333

    
334

    
335
class _Argument:
336
  def __init__(self, min=0, max=None): # pylint: disable=W0622
337
    self.min = min
338
    self.max = max
339

    
340
  def __repr__(self):
341
    return ("<%s min=%s max=%s>" %
342
            (self.__class__.__name__, self.min, self.max))
343

    
344

    
345
class ArgSuggest(_Argument):
346
  """Suggesting argument.
347

348
  Value can be any of the ones passed to the constructor.
349

350
  """
351
  # pylint: disable=W0622
352
  def __init__(self, min=0, max=None, choices=None):
353
    _Argument.__init__(self, min=min, max=max)
354
    self.choices = choices
355

    
356
  def __repr__(self):
357
    return ("<%s min=%s max=%s choices=%r>" %
358
            (self.__class__.__name__, self.min, self.max, self.choices))
359

    
360

    
361
class ArgChoice(ArgSuggest):
362
  """Choice argument.
363

364
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
365
  but value must be one of the choices.
366

367
  """
368

    
369

    
370
class ArgUnknown(_Argument):
371
  """Unknown argument to program (e.g. determined at runtime).
372

373
  """
374

    
375

    
376
class ArgInstance(_Argument):
377
  """Instances argument.
378

379
  """
380

    
381

    
382
class ArgNode(_Argument):
383
  """Node argument.
384

385
  """
386

    
387

    
388
class ArgNetwork(_Argument):
389
  """Network argument.
390

391
  """
392

    
393

    
394
class ArgGroup(_Argument):
395
  """Node group argument.
396

397
  """
398

    
399

    
400
class ArgJobId(_Argument):
401
  """Job ID argument.
402

403
  """
404

    
405

    
406
class ArgFile(_Argument):
407
  """File path argument.
408

409
  """
410

    
411

    
412
class ArgCommand(_Argument):
413
  """Command argument.
414

415
  """
416

    
417

    
418
class ArgHost(_Argument):
419
  """Host argument.
420

421
  """
422

    
423

    
424
class ArgOs(_Argument):
425
  """OS argument.
426

427
  """
428

    
429

    
430
class ArgExtStorage(_Argument):
431
  """ExtStorage argument.
432

433
  """
434

    
435

    
436
ARGS_NONE = []
437
ARGS_MANY_INSTANCES = [ArgInstance()]
438
ARGS_MANY_NETWORKS = [ArgNetwork()]
439
ARGS_MANY_NODES = [ArgNode()]
440
ARGS_MANY_GROUPS = [ArgGroup()]
441
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
442
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
443
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
444
# TODO
445
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
446
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
447

    
448

    
449
def _ExtractTagsObject(opts, args):
450
  """Extract the tag type object.
451

452
  Note that this function will modify its args parameter.
453

454
  """
455
  if not hasattr(opts, "tag_type"):
456
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
457
  kind = opts.tag_type
458
  if kind == constants.TAG_CLUSTER:
459
    retval = kind, None
460
  elif kind in (constants.TAG_NODEGROUP,
461
                constants.TAG_NODE,
462
                constants.TAG_NETWORK,
463
                constants.TAG_INSTANCE):
464
    if not args:
465
      raise errors.OpPrereqError("no arguments passed to the command",
466
                                 errors.ECODE_INVAL)
467
    name = args.pop(0)
468
    retval = kind, name
469
  else:
470
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
471
  return retval
472

    
473

    
474
def _ExtendTags(opts, args):
475
  """Extend the args if a source file has been given.
476

477
  This function will extend the tags with the contents of the file
478
  passed in the 'tags_source' attribute of the opts parameter. A file
479
  named '-' will be replaced by stdin.
480

481
  """
482
  fname = opts.tags_source
483
  if fname is None:
484
    return
485
  if fname == "-":
486
    new_fh = sys.stdin
487
  else:
488
    new_fh = open(fname, "r")
489
  new_data = []
490
  try:
491
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
492
    # because of python bug 1633941
493
    while True:
494
      line = new_fh.readline()
495
      if not line:
496
        break
497
      new_data.append(line.strip())
498
  finally:
499
    new_fh.close()
500
  args.extend(new_data)
501

    
502

    
503
def ListTags(opts, args):
504
  """List the tags on a given object.
505

506
  This is a generic implementation that knows how to deal with all
507
  three cases of tag objects (cluster, node, instance). The opts
508
  argument is expected to contain a tag_type field denoting what
509
  object type we work on.
510

511
  """
512
  kind, name = _ExtractTagsObject(opts, args)
513
  cl = GetClient(query=True)
514
  result = cl.QueryTags(kind, name)
515
  result = list(result)
516
  result.sort()
517
  for tag in result:
518
    ToStdout(tag)
519

    
520

    
521
def AddTags(opts, args):
522
  """Add tags on a given object.
523

524
  This is a generic implementation that knows how to deal with all
525
  three cases of tag objects (cluster, node, instance). The opts
526
  argument is expected to contain a tag_type field denoting what
527
  object type we work on.
528

529
  """
530
  kind, name = _ExtractTagsObject(opts, args)
531
  _ExtendTags(opts, args)
532
  if not args:
533
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
534
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
535
  SubmitOrSend(op, opts)
536

    
537

    
538
def RemoveTags(opts, args):
539
  """Remove tags from a given object.
540

541
  This is a generic implementation that knows how to deal with all
542
  three cases of tag objects (cluster, node, instance). The opts
543
  argument is expected to contain a tag_type field denoting what
544
  object type we work on.
545

546
  """
547
  kind, name = _ExtractTagsObject(opts, args)
548
  _ExtendTags(opts, args)
549
  if not args:
550
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
551
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
552
  SubmitOrSend(op, opts)
553

    
554

    
555
def check_unit(option, opt, value): # pylint: disable=W0613
556
  """OptParsers custom converter for units.
557

558
  """
559
  try:
560
    return utils.ParseUnit(value)
561
  except errors.UnitParseError, err:
562
    raise OptionValueError("option %s: %s" % (opt, err))
563

    
564

    
565
def _SplitKeyVal(opt, data):
566
  """Convert a KeyVal string into a dict.
567

568
  This function will convert a key=val[,...] string into a dict. Empty
569
  values will be converted specially: keys which have the prefix 'no_'
570
  will have the value=False and the prefix stripped, the others will
571
  have value=True.
572

573
  @type opt: string
574
  @param opt: a string holding the option name for which we process the
575
      data, used in building error messages
576
  @type data: string
577
  @param data: a string of the format key=val,key=val,...
578
  @rtype: dict
579
  @return: {key=val, key=val}
580
  @raises errors.ParameterError: if there are duplicate keys
581

582
  """
583
  kv_dict = {}
584
  if data:
585
    for elem in utils.UnescapeAndSplit(data, sep=","):
586
      if "=" in elem:
587
        key, val = elem.split("=", 1)
588
      else:
589
        if elem.startswith(NO_PREFIX):
590
          key, val = elem[len(NO_PREFIX):], False
591
        elif elem.startswith(UN_PREFIX):
592
          key, val = elem[len(UN_PREFIX):], None
593
        else:
594
          key, val = elem, True
595
      if key in kv_dict:
596
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
597
                                    (key, opt))
598
      kv_dict[key] = val
599
  return kv_dict
600

    
601

    
602
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
603
  """Custom parser for ident:key=val,key=val options.
604

605
  This will store the parsed values as a tuple (ident, {key: val}). As such,
606
  multiple uses of this option via action=append is possible.
607

608
  """
609
  if ":" not in value:
610
    ident, rest = value, ""
611
  else:
612
    ident, rest = value.split(":", 1)
613

    
614
  if ident.startswith(NO_PREFIX):
615
    if rest:
616
      msg = "Cannot pass options when removing parameter groups: %s" % value
617
      raise errors.ParameterError(msg)
618
    retval = (ident[len(NO_PREFIX):], False)
619
  elif (ident.startswith(UN_PREFIX) and
620
        (len(ident) <= len(UN_PREFIX) or
621
         not ident[len(UN_PREFIX)][0].isdigit())):
622
    if rest:
623
      msg = "Cannot pass options when removing parameter groups: %s" % value
624
      raise errors.ParameterError(msg)
625
    retval = (ident[len(UN_PREFIX):], None)
626
  else:
627
    kv_dict = _SplitKeyVal(opt, rest)
628
    retval = (ident, kv_dict)
629
  return retval
630

    
631

    
632
def check_key_val(option, opt, value):  # pylint: disable=W0613
633
  """Custom parser class for key=val,key=val options.
634

635
  This will store the parsed values as a dict {key: val}.
636

637
  """
638
  return _SplitKeyVal(opt, value)
639

    
640

    
641
def check_bool(option, opt, value): # pylint: disable=W0613
642
  """Custom parser for yes/no options.
643

644
  This will store the parsed value as either True or False.
645

646
  """
647
  value = value.lower()
648
  if value == constants.VALUE_FALSE or value == "no":
649
    return False
650
  elif value == constants.VALUE_TRUE or value == "yes":
651
    return True
652
  else:
653
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
654

    
655

    
656
def check_list(option, opt, value): # pylint: disable=W0613
657
  """Custom parser for comma-separated lists.
658

659
  """
660
  # we have to make this explicit check since "".split(",") is [""],
661
  # not an empty list :(
662
  if not value:
663
    return []
664
  else:
665
    return utils.UnescapeAndSplit(value)
666

    
667

    
668
def check_maybefloat(option, opt, value): # pylint: disable=W0613
669
  """Custom parser for float numbers which might be also defaults.
670

671
  """
672
  value = value.lower()
673

    
674
  if value == constants.VALUE_DEFAULT:
675
    return value
676
  else:
677
    return float(value)
678

    
679

    
680
# completion_suggestion is normally a list. Using numeric values not evaluating
681
# to False for dynamic completion.
682
(OPT_COMPL_MANY_NODES,
683
 OPT_COMPL_ONE_NODE,
684
 OPT_COMPL_ONE_INSTANCE,
685
 OPT_COMPL_ONE_OS,
686
 OPT_COMPL_ONE_EXTSTORAGE,
687
 OPT_COMPL_ONE_IALLOCATOR,
688
 OPT_COMPL_ONE_NETWORK,
689
 OPT_COMPL_INST_ADD_NODES,
690
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
691

    
692
OPT_COMPL_ALL = compat.UniqueFrozenset([
693
  OPT_COMPL_MANY_NODES,
694
  OPT_COMPL_ONE_NODE,
695
  OPT_COMPL_ONE_INSTANCE,
696
  OPT_COMPL_ONE_OS,
697
  OPT_COMPL_ONE_EXTSTORAGE,
698
  OPT_COMPL_ONE_IALLOCATOR,
699
  OPT_COMPL_ONE_NETWORK,
700
  OPT_COMPL_INST_ADD_NODES,
701
  OPT_COMPL_ONE_NODEGROUP,
702
  ])
703

    
704

    
705
class CliOption(Option):
706
  """Custom option class for optparse.
707

708
  """
709
  ATTRS = Option.ATTRS + [
710
    "completion_suggest",
711
    ]
712
  TYPES = Option.TYPES + (
713
    "identkeyval",
714
    "keyval",
715
    "unit",
716
    "bool",
717
    "list",
718
    "maybefloat",
719
    )
720
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
721
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
722
  TYPE_CHECKER["keyval"] = check_key_val
723
  TYPE_CHECKER["unit"] = check_unit
724
  TYPE_CHECKER["bool"] = check_bool
725
  TYPE_CHECKER["list"] = check_list
726
  TYPE_CHECKER["maybefloat"] = check_maybefloat
727

    
728

    
729
# optparse.py sets make_option, so we do it for our own option class, too
730
cli_option = CliOption
731

    
732

    
733
_YORNO = "yes|no"
734

    
735
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
736
                       help="Increase debugging level")
737

    
738
NOHDR_OPT = cli_option("--no-headers", default=False,
739
                       action="store_true", dest="no_headers",
740
                       help="Don't display column headers")
741

    
742
SEP_OPT = cli_option("--separator", default=None,
743
                     action="store", dest="separator",
744
                     help=("Separator between output fields"
745
                           " (defaults to one space)"))
746

    
747
USEUNITS_OPT = cli_option("--units", default=None,
748
                          dest="units", choices=("h", "m", "g", "t"),
749
                          help="Specify units for output (one of h/m/g/t)")
750

    
751
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
752
                        type="string", metavar="FIELDS",
753
                        help="Comma separated list of output fields")
754

    
755
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
756
                       default=False, help="Force the operation")
757

    
758
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
759
                         default=False, help="Do not require confirmation")
760

    
761
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
762
                                  action="store_true", default=False,
763
                                  help=("Ignore offline nodes and do as much"
764
                                        " as possible"))
765

    
766
TAG_ADD_OPT = cli_option("--tags", dest="tags",
767
                         default=None, help="Comma-separated list of instance"
768
                                            " tags")
769

    
770
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
771
                         default=None, help="File with tag names")
772

    
773
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
774
                        default=False, action="store_true",
775
                        help=("Submit the job and return the job ID, but"
776
                              " don't wait for the job to finish"))
777

    
778
SYNC_OPT = cli_option("--sync", dest="do_locking",
779
                      default=False, action="store_true",
780
                      help=("Grab locks while doing the queries"
781
                            " in order to ensure more consistent results"))
782

    
783
DRY_RUN_OPT = cli_option("--dry-run", default=False,
784
                         action="store_true",
785
                         help=("Do not execute the operation, just run the"
786
                               " check steps and verify if it could be"
787
                               " executed"))
788

    
789
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
790
                         action="store_true",
791
                         help="Increase the verbosity of the operation")
792

    
793
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
794
                              action="store_true", dest="simulate_errors",
795
                              help="Debugging option that makes the operation"
796
                              " treat most runtime checks as failed")
797

    
798
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
799
                        default=True, action="store_false",
800
                        help="Don't wait for sync (DANGEROUS!)")
801

    
802
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
803
                        default=False, action="store_true",
804
                        help="Wait for disks to sync")
805

    
806
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
807
                             action="store_true", default=False,
808
                             help="Enable offline instance")
809

    
810
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
811
                              action="store_true", default=False,
812
                              help="Disable down instance")
813

    
814
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
815
                               help=("Custom disk setup (%s)" %
816
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
817
                               default=None, metavar="TEMPL",
818
                               choices=list(constants.DISK_TEMPLATES))
819

    
820
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
821
                        help="Do not create any network cards for"
822
                        " the instance")
823

    
824
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
825
                               help="Relative path under default cluster-wide"
826
                               " file storage dir to store file-based disks",
827
                               default=None, metavar="<DIR>")
828

    
829
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
830
                                  help="Driver to use for image files",
831
                                  default="loop", metavar="<DRIVER>",
832
                                  choices=list(constants.FILE_DRIVER))
833

    
834
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
835
                            help="Select nodes for the instance automatically"
836
                            " using the <NAME> iallocator plugin",
837
                            default=None, type="string",
838
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
839

    
840
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
841
                                    metavar="<NAME>",
842
                                    help="Set the default instance"
843
                                    " allocator plugin",
844
                                    default=None, type="string",
845
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
846

    
847
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
848
                    metavar="<os>",
849
                    completion_suggest=OPT_COMPL_ONE_OS)
850

    
851
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
852
                          type="keyval", default={},
853
                          help="OS parameters")
854

    
855
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
856
                               action="store_true", default=False,
857
                               help="Force an unknown variant")
858

    
859
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
860
                            action="store_true", default=False,
861
                            help="Do not install the OS (will"
862
                            " enable no-start)")
863

    
864
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
865
                                dest="allow_runtime_chgs",
866
                                default=True, action="store_false",
867
                                help="Don't allow runtime changes")
868

    
869
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
870
                         type="keyval", default={},
871
                         help="Backend parameters")
872

    
873
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
874
                        default={}, dest="hvparams",
875
                        help="Hypervisor parameters")
876

    
877
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
878
                             help="Disk template parameters, in the format"
879
                             " template:option=value,option=value,...",
880
                             type="identkeyval", action="append", default=[])
881

    
882
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
883
                                 type="keyval", default={},
884
                                 help="Memory size specs: list of key=value,"
885
                                " where key is one of min, max, std"
886
                                 " (in MB or using a unit)")
887

    
888
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
889
                                 type="keyval", default={},
890
                                 help="CPU count specs: list of key=value,"
891
                                 " where key is one of min, max, std")
892

    
893
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
894
                                  dest="ispecs_disk_count",
895
                                  type="keyval", default={},
896
                                  help="Disk count specs: list of key=value,"
897
                                  " where key is one of min, max, std")
898

    
899
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
900
                                 type="keyval", default={},
901
                                 help="Disk size specs: list of key=value,"
902
                                 " where key is one of min, max, std"
903
                                 " (in MB or using a unit)")
904

    
905
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
906
                                 type="keyval", default={},
907
                                 help="NIC count specs: list of key=value,"
908
                                 " where key is one of min, max, std")
909

    
910
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
911
                                    dest="ipolicy_disk_templates",
912
                                    type="list", default=None,
913
                                    help="Comma-separated list of"
914
                                    " enabled disk templates")
915

    
916
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
917
                                 dest="ipolicy_vcpu_ratio",
918
                                 type="maybefloat", default=None,
919
                                 help="The maximum allowed vcpu-to-cpu ratio")
920

    
921
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
922
                                   dest="ipolicy_spindle_ratio",
923
                                   type="maybefloat", default=None,
924
                                   help=("The maximum allowed instances to"
925
                                         " spindle ratio"))
926

    
927
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
928
                            help="Hypervisor and hypervisor options, in the"
929
                            " format hypervisor:option=value,option=value,...",
930
                            default=None, type="identkeyval")
931

    
932
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
933
                        help="Hypervisor and hypervisor options, in the"
934
                        " format hypervisor:option=value,option=value,...",
935
                        default=[], action="append", type="identkeyval")
936

    
937
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
938
                           action="store_false",
939
                           help="Don't check that the instance's IP"
940
                           " is alive")
941

    
942
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
943
                             default=True, action="store_false",
944
                             help="Don't check that the instance's name"
945
                             " is resolvable")
946

    
947
NET_OPT = cli_option("--net",
948
                     help="NIC parameters", default=[],
949
                     dest="nics", action="append", type="identkeyval")
950

    
951
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
952
                      dest="disks", action="append", type="identkeyval")
953

    
954
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
955
                         help="Comma-separated list of disks"
956
                         " indices to act on (e.g. 0,2) (optional,"
957
                         " defaults to all disks)")
958

    
959
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
960
                         help="Enforces a single-disk configuration using the"
961
                         " given disk size, in MiB unless a suffix is used",
962
                         default=None, type="unit", metavar="<size>")
963

    
964
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
965
                                dest="ignore_consistency",
966
                                action="store_true", default=False,
967
                                help="Ignore the consistency of the disks on"
968
                                " the secondary")
969

    
970
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
971
                                dest="allow_failover",
972
                                action="store_true", default=False,
973
                                help="If migration is not possible fallback to"
974
                                     " failover")
975

    
976
NONLIVE_OPT = cli_option("--non-live", dest="live",
977
                         default=True, action="store_false",
978
                         help="Do a non-live migration (this usually means"
979
                         " freeze the instance, save the state, transfer and"
980
                         " only then resume running on the secondary node)")
981

    
982
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
983
                                default=None,
984
                                choices=list(constants.HT_MIGRATION_MODES),
985
                                help="Override default migration mode (choose"
986
                                " either live or non-live")
987

    
988
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
989
                                help="Target node and optional secondary node",
990
                                metavar="<pnode>[:<snode>]",
991
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
992

    
993
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
994
                           action="append", metavar="<node>",
995
                           help="Use only this node (can be used multiple"
996
                           " times, if not given defaults to all nodes)",
997
                           completion_suggest=OPT_COMPL_ONE_NODE)
998

    
999
NODEGROUP_OPT_NAME = "--node-group"
1000
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1001
                           dest="nodegroup",
1002
                           help="Node group (name or uuid)",
1003
                           metavar="<nodegroup>",
1004
                           default=None, type="string",
1005
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1006

    
1007
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1008
                             metavar="<node>",
1009
                             completion_suggest=OPT_COMPL_ONE_NODE)
1010

    
1011
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1012
                         action="store_false",
1013
                         help="Don't start the instance after creation")
1014

    
1015
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1016
                         action="store_true", default=False,
1017
                         help="Show command instead of executing it")
1018

    
1019
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1020
                         default=False, action="store_true",
1021
                         help="Instead of performing the migration, try to"
1022
                         " recover from a failed cleanup. This is safe"
1023
                         " to run even if the instance is healthy, but it"
1024
                         " will create extra replication traffic and "
1025
                         " disrupt briefly the replication (like during the"
1026
                         " migration")
1027

    
1028
STATIC_OPT = cli_option("-s", "--static", dest="static",
1029
                        action="store_true", default=False,
1030
                        help="Only show configuration data, not runtime data")
1031

    
1032
ALL_OPT = cli_option("--all", dest="show_all",
1033
                     default=False, action="store_true",
1034
                     help="Show info on all instances on the cluster."
1035
                     " This can take a long time to run, use wisely")
1036

    
1037
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1038
                           action="store_true", default=False,
1039
                           help="Interactive OS reinstall, lists available"
1040
                           " OS templates for selection")
1041

    
1042
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1043
                                 action="store_true", default=False,
1044
                                 help="Remove the instance from the cluster"
1045
                                 " configuration even if there are failures"
1046
                                 " during the removal process")
1047

    
1048
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1049
                                        dest="ignore_remove_failures",
1050
                                        action="store_true", default=False,
1051
                                        help="Remove the instance from the"
1052
                                        " cluster configuration even if there"
1053
                                        " are failures during the removal"
1054
                                        " process")
1055

    
1056
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1057
                                 action="store_true", default=False,
1058
                                 help="Remove the instance from the cluster")
1059

    
1060
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1061
                               help="Specifies the new node for the instance",
1062
                               metavar="NODE", default=None,
1063
                               completion_suggest=OPT_COMPL_ONE_NODE)
1064

    
1065
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1066
                               help="Specifies the new secondary node",
1067
                               metavar="NODE", default=None,
1068
                               completion_suggest=OPT_COMPL_ONE_NODE)
1069

    
1070
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1071
                            default=False, action="store_true",
1072
                            help="Replace the disk(s) on the primary"
1073
                                 " node (applies only to internally mirrored"
1074
                                 " disk templates, e.g. %s)" %
1075
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1076

    
1077
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1078
                              default=False, action="store_true",
1079
                              help="Replace the disk(s) on the secondary"
1080
                                   " node (applies only to internally mirrored"
1081
                                   " disk templates, e.g. %s)" %
1082
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1083

    
1084
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1085
                              default=False, action="store_true",
1086
                              help="Lock all nodes and auto-promote as needed"
1087
                              " to MC status")
1088

    
1089
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1090
                              default=False, action="store_true",
1091
                              help="Automatically replace faulty disks"
1092
                                   " (applies only to internally mirrored"
1093
                                   " disk templates, e.g. %s)" %
1094
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1095

    
1096
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1097
                             default=False, action="store_true",
1098
                             help="Ignore current recorded size"
1099
                             " (useful for forcing activation when"
1100
                             " the recorded size is wrong)")
1101

    
1102
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1103
                          metavar="<node>",
1104
                          completion_suggest=OPT_COMPL_ONE_NODE)
1105

    
1106
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1107
                         metavar="<dir>")
1108

    
1109
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1110
                              help="Specify the secondary ip for the node",
1111
                              metavar="ADDRESS", default=None)
1112

    
1113
READD_OPT = cli_option("--readd", dest="readd",
1114
                       default=False, action="store_true",
1115
                       help="Readd old node after replacing it")
1116

    
1117
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1118
                                default=True, action="store_false",
1119
                                help="Disable SSH key fingerprint checking")
1120

    
1121
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1122
                                 default=False, action="store_true",
1123
                                 help="Force the joining of a node")
1124

    
1125
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1126
                    type="bool", default=None, metavar=_YORNO,
1127
                    help="Set the master_candidate flag on the node")
1128

    
1129
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1130
                         type="bool", default=None,
1131
                         help=("Set the offline flag on the node"
1132
                               " (cluster does not communicate with offline"
1133
                               " nodes)"))
1134

    
1135
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1136
                         type="bool", default=None,
1137
                         help=("Set the drained flag on the node"
1138
                               " (excluded from allocation operations)"))
1139

    
1140
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1141
                              type="bool", default=None, metavar=_YORNO,
1142
                              help="Set the master_capable flag on the node")
1143

    
1144
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1145
                          type="bool", default=None, metavar=_YORNO,
1146
                          help="Set the vm_capable flag on the node")
1147

    
1148
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1149
                             type="bool", default=None, metavar=_YORNO,
1150
                             help="Set the allocatable flag on a volume")
1151

    
1152
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1153
                               help="Disable support for lvm based instances"
1154
                               " (cluster-wide)",
1155
                               action="store_false", default=True)
1156

    
1157
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1158
                            dest="enabled_hypervisors",
1159
                            help="Comma-separated list of hypervisors",
1160
                            type="string", default=None)
1161

    
1162
ENABLED_STORAGE_TYPES_OPT = cli_option("--enabled-storage-types",
1163
                                       dest="enabled_storage_types",
1164
                                       help="Comma-separated list of "
1165
                                            "storage methods",
1166
                                       type="string", default=None)
1167

    
1168
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1169
                            type="keyval", default={},
1170
                            help="NIC parameters")
1171

    
1172
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1173
                         dest="candidate_pool_size", type="int",
1174
                         help="Set the candidate pool size")
1175

    
1176
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1177
                         help=("Enables LVM and specifies the volume group"
1178
                               " name (cluster-wide) for disk allocation"
1179
                               " [%s]" % constants.DEFAULT_VG),
1180
                         metavar="VG", default=None)
1181

    
1182
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1183
                          help="Destroy cluster", action="store_true")
1184

    
1185
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1186
                          help="Skip node agreement check (dangerous)",
1187
                          action="store_true", default=False)
1188

    
1189
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1190
                            help="Specify the mac prefix for the instance IP"
1191
                            " addresses, in the format XX:XX:XX",
1192
                            metavar="PREFIX",
1193
                            default=None)
1194

    
1195
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1196
                               help="Specify the node interface (cluster-wide)"
1197
                               " on which the master IP address will be added"
1198
                               " (cluster init default: %s)" %
1199
                               constants.DEFAULT_BRIDGE,
1200
                               metavar="NETDEV",
1201
                               default=None)
1202

    
1203
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1204
                                help="Specify the netmask of the master IP",
1205
                                metavar="NETMASK",
1206
                                default=None)
1207

    
1208
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1209
                                     dest="use_external_mip_script",
1210
                                     help="Specify whether to run a"
1211
                                     " user-provided script for the master"
1212
                                     " IP address turnup and"
1213
                                     " turndown operations",
1214
                                     type="bool", metavar=_YORNO, default=None)
1215

    
1216
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1217
                                help="Specify the default directory (cluster-"
1218
                                "wide) for storing the file-based disks [%s]" %
1219
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1220
                                metavar="DIR",
1221
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1222

    
1223
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1224
  "--shared-file-storage-dir",
1225
  dest="shared_file_storage_dir",
1226
  help="Specify the default directory (cluster-wide) for storing the"
1227
  " shared file-based disks [%s]" %
1228
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1229
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1230

    
1231
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1232
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1233
                                   action="store_false", default=True)
1234

    
1235
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1236
                                    help="Don't initialize SSH keys",
1237
                                    action="store_false", default=True)
1238

    
1239
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1240
                             help="Enable parseable error messages",
1241
                             action="store_true", default=False)
1242

    
1243
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1244
                          help="Skip N+1 memory redundancy tests",
1245
                          action="store_true", default=False)
1246

    
1247
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1248
                             help="Type of reboot: soft/hard/full",
1249
                             default=constants.INSTANCE_REBOOT_HARD,
1250
                             metavar="<REBOOT>",
1251
                             choices=list(constants.REBOOT_TYPES))
1252

    
1253
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1254
                                    dest="ignore_secondaries",
1255
                                    default=False, action="store_true",
1256
                                    help="Ignore errors from secondaries")
1257

    
1258
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1259
                            action="store_false", default=True,
1260
                            help="Don't shutdown the instance (unsafe)")
1261

    
1262
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1263
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1264
                         help="Maximum time to wait")
1265

    
1266
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1267
                                  dest="shutdown_timeout", type="int",
1268
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1269
                                  help="Maximum time to wait for instance"
1270
                                  " shutdown")
1271

    
1272
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1273
                          default=None,
1274
                          help=("Number of seconds between repetions of the"
1275
                                " command"))
1276

    
1277
EARLY_RELEASE_OPT = cli_option("--early-release",
1278
                               dest="early_release", default=False,
1279
                               action="store_true",
1280
                               help="Release the locks on the secondary"
1281
                               " node(s) early")
1282

    
1283
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1284
                                  dest="new_cluster_cert",
1285
                                  default=False, action="store_true",
1286
                                  help="Generate a new cluster certificate")
1287

    
1288
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1289
                           default=None,
1290
                           help="File containing new RAPI certificate")
1291

    
1292
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1293
                               default=None, action="store_true",
1294
                               help=("Generate a new self-signed RAPI"
1295
                                     " certificate"))
1296

    
1297
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1298
                            default=None,
1299
                            help="File containing new SPICE certificate")
1300

    
1301
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1302
                              default=None,
1303
                              help="File containing the certificate of the CA"
1304
                              " which signed the SPICE certificate")
1305

    
1306
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1307
                                dest="new_spice_cert", default=None,
1308
                                action="store_true",
1309
                                help=("Generate a new self-signed SPICE"
1310
                                      " certificate"))
1311

    
1312
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1313
                                    dest="new_confd_hmac_key",
1314
                                    default=False, action="store_true",
1315
                                    help=("Create a new HMAC key for %s" %
1316
                                          constants.CONFD))
1317

    
1318
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1319
                                       dest="cluster_domain_secret",
1320
                                       default=None,
1321
                                       help=("Load new new cluster domain"
1322
                                             " secret from file"))
1323

    
1324
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1325
                                           dest="new_cluster_domain_secret",
1326
                                           default=False, action="store_true",
1327
                                           help=("Create a new cluster domain"
1328
                                                 " secret"))
1329

    
1330
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1331
                              dest="use_replication_network",
1332
                              help="Whether to use the replication network"
1333
                              " for talking to the nodes",
1334
                              action="store_true", default=False)
1335

    
1336
MAINTAIN_NODE_HEALTH_OPT = \
1337
    cli_option("--maintain-node-health", dest="maintain_node_health",
1338
               metavar=_YORNO, default=None, type="bool",
1339
               help="Configure the cluster to automatically maintain node"
1340
               " health, by shutting down unknown instances, shutting down"
1341
               " unknown DRBD devices, etc.")
1342

    
1343
IDENTIFY_DEFAULTS_OPT = \
1344
    cli_option("--identify-defaults", dest="identify_defaults",
1345
               default=False, action="store_true",
1346
               help="Identify which saved instance parameters are equal to"
1347
               " the current cluster defaults and set them as such, instead"
1348
               " of marking them as overridden")
1349

    
1350
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1351
                         action="store", dest="uid_pool",
1352
                         help=("A list of user-ids or user-id"
1353
                               " ranges separated by commas"))
1354

    
1355
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1356
                          action="store", dest="add_uids",
1357
                          help=("A list of user-ids or user-id"
1358
                                " ranges separated by commas, to be"
1359
                                " added to the user-id pool"))
1360

    
1361
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1362
                             action="store", dest="remove_uids",
1363
                             help=("A list of user-ids or user-id"
1364
                                   " ranges separated by commas, to be"
1365
                                   " removed from the user-id pool"))
1366

    
1367
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1368
                              action="store", dest="reserved_lvs",
1369
                              help=("A comma-separated list of reserved"
1370
                                    " logical volumes names, that will be"
1371
                                    " ignored by cluster verify"))
1372

    
1373
ROMAN_OPT = cli_option("--roman",
1374
                       dest="roman_integers", default=False,
1375
                       action="store_true",
1376
                       help="Use roman numbers for positive integers")
1377

    
1378
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1379
                             action="store", default=None,
1380
                             help="Specifies usermode helper for DRBD")
1381

    
1382
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1383
                                action="store_false", default=True,
1384
                                help="Disable support for DRBD")
1385

    
1386
PRIMARY_IP_VERSION_OPT = \
1387
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1388
               action="store", dest="primary_ip_version",
1389
               metavar="%d|%d" % (constants.IP4_VERSION,
1390
                                  constants.IP6_VERSION),
1391
               help="Cluster-wide IP version for primary IP")
1392

    
1393
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1394
                              action="store_true",
1395
                              help="Show machine name for every line in output")
1396

    
1397
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1398
                              action="store_true",
1399
                              help=("Hide successful results and show failures"
1400
                                    " only (determined by the exit code)"))
1401

    
1402
REASON_OPT = cli_option("--reason", default=None,
1403
                        help="The reason for executing a VM-state-changing"
1404
                             " operation")
1405

    
1406

    
1407
def _PriorityOptionCb(option, _, value, parser):
1408
  """Callback for processing C{--priority} option.
1409

1410
  """
1411
  value = _PRIONAME_TO_VALUE[value]
1412

    
1413
  setattr(parser.values, option.dest, value)
1414

    
1415

    
1416
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1417
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1418
                          choices=_PRIONAME_TO_VALUE.keys(),
1419
                          action="callback", type="choice",
1420
                          callback=_PriorityOptionCb,
1421
                          help="Priority for opcode processing")
1422

    
1423
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1424
                        type="bool", default=None, metavar=_YORNO,
1425
                        help="Sets the hidden flag on the OS")
1426

    
1427
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1428
                        type="bool", default=None, metavar=_YORNO,
1429
                        help="Sets the blacklisted flag on the OS")
1430

    
1431
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1432
                                     type="bool", metavar=_YORNO,
1433
                                     dest="prealloc_wipe_disks",
1434
                                     help=("Wipe disks prior to instance"
1435
                                           " creation"))
1436

    
1437
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1438
                             type="keyval", default=None,
1439
                             help="Node parameters")
1440

    
1441
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1442
                              action="store", metavar="POLICY", default=None,
1443
                              help="Allocation policy for the node group")
1444

    
1445
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1446
                              type="bool", metavar=_YORNO,
1447
                              dest="node_powered",
1448
                              help="Specify if the SoR for node is powered")
1449

    
1450
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1451
                             default=constants.OOB_TIMEOUT,
1452
                             help="Maximum time to wait for out-of-band helper")
1453

    
1454
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1455
                             default=constants.OOB_POWER_DELAY,
1456
                             help="Time in seconds to wait between power-ons")
1457

    
1458
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1459
                              action="store_true", default=False,
1460
                              help=("Whether command argument should be treated"
1461
                                    " as filter"))
1462

    
1463
NO_REMEMBER_OPT = cli_option("--no-remember",
1464
                             dest="no_remember",
1465
                             action="store_true", default=False,
1466
                             help="Perform but do not record the change"
1467
                             " in the configuration")
1468

    
1469
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1470
                              default=False, action="store_true",
1471
                              help="Evacuate primary instances only")
1472

    
1473
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1474
                                default=False, action="store_true",
1475
                                help="Evacuate secondary instances only"
1476
                                     " (applies only to internally mirrored"
1477
                                     " disk templates, e.g. %s)" %
1478
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1479

    
1480
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1481
                                action="store_true", default=False,
1482
                                help="Pause instance at startup")
1483

    
1484
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1485
                          help="Destination node group (name or uuid)",
1486
                          default=None, action="append",
1487
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1488

    
1489
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1490
                               action="append", dest="ignore_errors",
1491
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1492
                               help="Error code to be ignored")
1493

    
1494
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1495
                            action="append",
1496
                            help=("Specify disk state information in the"
1497
                                  " format"
1498
                                  " storage_type/identifier:option=value,...;"
1499
                                  " note this is unused for now"),
1500
                            type="identkeyval")
1501

    
1502
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1503
                          action="append",
1504
                          help=("Specify hypervisor state information in the"
1505
                                " format hypervisor:option=value,...;"
1506
                                " note this is unused for now"),
1507
                          type="identkeyval")
1508

    
1509
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1510
                                action="store_true", default=False,
1511
                                help="Ignore instance policy violations")
1512

    
1513
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1514
                             help="Sets the instance's runtime memory,"
1515
                             " ballooning it up or down to the new value",
1516
                             default=None, type="unit", metavar="<size>")
1517

    
1518
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1519
                          action="store_true", default=False,
1520
                          help="Marks the grow as absolute instead of the"
1521
                          " (default) relative mode")
1522

    
1523
NETWORK_OPT = cli_option("--network",
1524
                         action="store", default=None, dest="network",
1525
                         help="IP network in CIDR notation")
1526

    
1527
GATEWAY_OPT = cli_option("--gateway",
1528
                         action="store", default=None, dest="gateway",
1529
                         help="IP address of the router (gateway)")
1530

    
1531
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1532
                                  action="store", default=None,
1533
                                  dest="add_reserved_ips",
1534
                                  help="Comma-separated list of"
1535
                                  " reserved IPs to add")
1536

    
1537
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1538
                                     action="store", default=None,
1539
                                     dest="remove_reserved_ips",
1540
                                     help="Comma-delimited list of"
1541
                                     " reserved IPs to remove")
1542

    
1543
NETWORK6_OPT = cli_option("--network6",
1544
                          action="store", default=None, dest="network6",
1545
                          help="IP network in CIDR notation")
1546

    
1547
GATEWAY6_OPT = cli_option("--gateway6",
1548
                          action="store", default=None, dest="gateway6",
1549
                          help="IP6 address of the router (gateway)")
1550

    
1551
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1552
                                  dest="conflicts_check",
1553
                                  default=True,
1554
                                  action="store_false",
1555
                                  help="Don't check for conflicting IPs")
1556

    
1557
#: Options provided by all commands
1558
COMMON_OPTS = [DEBUG_OPT]
1559

    
1560
# common options for creating instances. add and import then add their own
1561
# specific ones.
1562
COMMON_CREATE_OPTS = [
1563
  BACKEND_OPT,
1564
  DISK_OPT,
1565
  DISK_TEMPLATE_OPT,
1566
  FILESTORE_DIR_OPT,
1567
  FILESTORE_DRIVER_OPT,
1568
  HYPERVISOR_OPT,
1569
  IALLOCATOR_OPT,
1570
  NET_OPT,
1571
  NODE_PLACEMENT_OPT,
1572
  NOIPCHECK_OPT,
1573
  NOCONFLICTSCHECK_OPT,
1574
  NONAMECHECK_OPT,
1575
  NONICS_OPT,
1576
  NWSYNC_OPT,
1577
  OSPARAMS_OPT,
1578
  OS_SIZE_OPT,
1579
  SUBMIT_OPT,
1580
  TAG_ADD_OPT,
1581
  DRY_RUN_OPT,
1582
  PRIORITY_OPT,
1583
  ]
1584

    
1585
# common instance policy options
1586
INSTANCE_POLICY_OPTS = [
1587
  SPECS_CPU_COUNT_OPT,
1588
  SPECS_DISK_COUNT_OPT,
1589
  SPECS_DISK_SIZE_OPT,
1590
  SPECS_MEM_SIZE_OPT,
1591
  SPECS_NIC_COUNT_OPT,
1592
  IPOLICY_DISK_TEMPLATES,
1593
  IPOLICY_VCPU_RATIO,
1594
  IPOLICY_SPINDLE_RATIO,
1595
  ]
1596

    
1597

    
1598
class _ShowUsage(Exception):
1599
  """Exception class for L{_ParseArgs}.
1600

1601
  """
1602
  def __init__(self, exit_error):
1603
    """Initializes instances of this class.
1604

1605
    @type exit_error: bool
1606
    @param exit_error: Whether to report failure on exit
1607

1608
    """
1609
    Exception.__init__(self)
1610
    self.exit_error = exit_error
1611

    
1612

    
1613
class _ShowVersion(Exception):
1614
  """Exception class for L{_ParseArgs}.
1615

1616
  """
1617

    
1618

    
1619
def _ParseArgs(binary, argv, commands, aliases, env_override):
1620
  """Parser for the command line arguments.
1621

1622
  This function parses the arguments and returns the function which
1623
  must be executed together with its (modified) arguments.
1624

1625
  @param binary: Script name
1626
  @param argv: Command line arguments
1627
  @param commands: Dictionary containing command definitions
1628
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1629
  @param env_override: list of env variables allowed for default args
1630
  @raise _ShowUsage: If usage description should be shown
1631
  @raise _ShowVersion: If version should be shown
1632

1633
  """
1634
  assert not (env_override - set(commands))
1635
  assert not (set(aliases.keys()) & set(commands.keys()))
1636

    
1637
  if len(argv) > 1:
1638
    cmd = argv[1]
1639
  else:
1640
    # No option or command given
1641
    raise _ShowUsage(exit_error=True)
1642

    
1643
  if cmd == "--version":
1644
    raise _ShowVersion()
1645
  elif cmd == "--help":
1646
    raise _ShowUsage(exit_error=False)
1647
  elif not (cmd in commands or cmd in aliases):
1648
    raise _ShowUsage(exit_error=True)
1649

    
1650
  # get command, unalias it, and look it up in commands
1651
  if cmd in aliases:
1652
    if aliases[cmd] not in commands:
1653
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1654
                                   " command '%s'" % (cmd, aliases[cmd]))
1655

    
1656
    cmd = aliases[cmd]
1657

    
1658
  if cmd in env_override:
1659
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1660
    env_args = os.environ.get(args_env_name)
1661
    if env_args:
1662
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1663

    
1664
  func, args_def, parser_opts, usage, description = commands[cmd]
1665
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1666
                        description=description,
1667
                        formatter=TitledHelpFormatter(),
1668
                        usage="%%prog %s %s" % (cmd, usage))
1669
  parser.disable_interspersed_args()
1670
  options, args = parser.parse_args(args=argv[2:])
1671

    
1672
  if not _CheckArguments(cmd, args_def, args):
1673
    return None, None, None
1674

    
1675
  return func, options, args
1676

    
1677

    
1678
def _FormatUsage(binary, commands):
1679
  """Generates a nice description of all commands.
1680

1681
  @param binary: Script name
1682
  @param commands: Dictionary containing command definitions
1683

1684
  """
1685
  # compute the max line length for cmd + usage
1686
  mlen = min(60, max(map(len, commands)))
1687

    
1688
  yield "Usage: %s {command} [options...] [argument...]" % binary
1689
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1690
  yield ""
1691
  yield "Commands:"
1692

    
1693
  # and format a nice command list
1694
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1695
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1696
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1697
    for line in help_lines:
1698
      yield " %-*s   %s" % (mlen, "", line)
1699

    
1700
  yield ""
1701

    
1702

    
1703
def _CheckArguments(cmd, args_def, args):
1704
  """Verifies the arguments using the argument definition.
1705

1706
  Algorithm:
1707

1708
    1. Abort with error if values specified by user but none expected.
1709

1710
    1. For each argument in definition
1711

1712
      1. Keep running count of minimum number of values (min_count)
1713
      1. Keep running count of maximum number of values (max_count)
1714
      1. If it has an unlimited number of values
1715

1716
        1. Abort with error if it's not the last argument in the definition
1717

1718
    1. If last argument has limited number of values
1719

1720
      1. Abort with error if number of values doesn't match or is too large
1721

1722
    1. Abort with error if user didn't pass enough values (min_count)
1723

1724
  """
1725
  if args and not args_def:
1726
    ToStderr("Error: Command %s expects no arguments", cmd)
1727
    return False
1728

    
1729
  min_count = None
1730
  max_count = None
1731
  check_max = None
1732

    
1733
  last_idx = len(args_def) - 1
1734

    
1735
  for idx, arg in enumerate(args_def):
1736
    if min_count is None:
1737
      min_count = arg.min
1738
    elif arg.min is not None:
1739
      min_count += arg.min
1740

    
1741
    if max_count is None:
1742
      max_count = arg.max
1743
    elif arg.max is not None:
1744
      max_count += arg.max
1745

    
1746
    if idx == last_idx:
1747
      check_max = (arg.max is not None)
1748

    
1749
    elif arg.max is None:
1750
      raise errors.ProgrammerError("Only the last argument can have max=None")
1751

    
1752
  if check_max:
1753
    # Command with exact number of arguments
1754
    if (min_count is not None and max_count is not None and
1755
        min_count == max_count and len(args) != min_count):
1756
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1757
      return False
1758

    
1759
    # Command with limited number of arguments
1760
    if max_count is not None and len(args) > max_count:
1761
      ToStderr("Error: Command %s expects only %d argument(s)",
1762
               cmd, max_count)
1763
      return False
1764

    
1765
  # Command with some required arguments
1766
  if min_count is not None and len(args) < min_count:
1767
    ToStderr("Error: Command %s expects at least %d argument(s)",
1768
             cmd, min_count)
1769
    return False
1770

    
1771
  return True
1772

    
1773

    
1774
def SplitNodeOption(value):
1775
  """Splits the value of a --node option.
1776

1777
  """
1778
  if value and ":" in value:
1779
    return value.split(":", 1)
1780
  else:
1781
    return (value, None)
1782

    
1783

    
1784
def CalculateOSNames(os_name, os_variants):
1785
  """Calculates all the names an OS can be called, according to its variants.
1786

1787
  @type os_name: string
1788
  @param os_name: base name of the os
1789
  @type os_variants: list or None
1790
  @param os_variants: list of supported variants
1791
  @rtype: list
1792
  @return: list of valid names
1793

1794
  """
1795
  if os_variants:
1796
    return ["%s+%s" % (os_name, v) for v in os_variants]
1797
  else:
1798
    return [os_name]
1799

    
1800

    
1801
def ParseFields(selected, default):
1802
  """Parses the values of "--field"-like options.
1803

1804
  @type selected: string or None
1805
  @param selected: User-selected options
1806
  @type default: list
1807
  @param default: Default fields
1808

1809
  """
1810
  if selected is None:
1811
    return default
1812

    
1813
  if selected.startswith("+"):
1814
    return default + selected[1:].split(",")
1815

    
1816
  return selected.split(",")
1817

    
1818

    
1819
UsesRPC = rpc.RunWithRPC
1820

    
1821

    
1822
def AskUser(text, choices=None):
1823
  """Ask the user a question.
1824

1825
  @param text: the question to ask
1826

1827
  @param choices: list with elements tuples (input_char, return_value,
1828
      description); if not given, it will default to: [('y', True,
1829
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1830
      note that the '?' char is reserved for help
1831

1832
  @return: one of the return values from the choices list; if input is
1833
      not possible (i.e. not running with a tty, we return the last
1834
      entry from the list
1835

1836
  """
1837
  if choices is None:
1838
    choices = [("y", True, "Perform the operation"),
1839
               ("n", False, "Do not perform the operation")]
1840
  if not choices or not isinstance(choices, list):
1841
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1842
  for entry in choices:
1843
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1844
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1845

    
1846
  answer = choices[-1][1]
1847
  new_text = []
1848
  for line in text.splitlines():
1849
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1850
  text = "\n".join(new_text)
1851
  try:
1852
    f = file("/dev/tty", "a+")
1853
  except IOError:
1854
    return answer
1855
  try:
1856
    chars = [entry[0] for entry in choices]
1857
    chars[-1] = "[%s]" % chars[-1]
1858
    chars.append("?")
1859
    maps = dict([(entry[0], entry[1]) for entry in choices])
1860
    while True:
1861
      f.write(text)
1862
      f.write("\n")
1863
      f.write("/".join(chars))
1864
      f.write(": ")
1865
      line = f.readline(2).strip().lower()
1866
      if line in maps:
1867
        answer = maps[line]
1868
        break
1869
      elif line == "?":
1870
        for entry in choices:
1871
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1872
        f.write("\n")
1873
        continue
1874
  finally:
1875
    f.close()
1876
  return answer
1877

    
1878

    
1879
class JobSubmittedException(Exception):
1880
  """Job was submitted, client should exit.
1881

1882
  This exception has one argument, the ID of the job that was
1883
  submitted. The handler should print this ID.
1884

1885
  This is not an error, just a structured way to exit from clients.
1886

1887
  """
1888

    
1889

    
1890
def SendJob(ops, cl=None):
1891
  """Function to submit an opcode without waiting for the results.
1892

1893
  @type ops: list
1894
  @param ops: list of opcodes
1895
  @type cl: luxi.Client
1896
  @param cl: the luxi client to use for communicating with the master;
1897
             if None, a new client will be created
1898

1899
  """
1900
  if cl is None:
1901
    cl = GetClient()
1902

    
1903
  job_id = cl.SubmitJob(ops)
1904

    
1905
  return job_id
1906

    
1907

    
1908
def GenericPollJob(job_id, cbs, report_cbs):
1909
  """Generic job-polling function.
1910

1911
  @type job_id: number
1912
  @param job_id: Job ID
1913
  @type cbs: Instance of L{JobPollCbBase}
1914
  @param cbs: Data callbacks
1915
  @type report_cbs: Instance of L{JobPollReportCbBase}
1916
  @param report_cbs: Reporting callbacks
1917

1918
  """
1919
  prev_job_info = None
1920
  prev_logmsg_serial = None
1921

    
1922
  status = None
1923

    
1924
  while True:
1925
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1926
                                      prev_logmsg_serial)
1927
    if not result:
1928
      # job not found, go away!
1929
      raise errors.JobLost("Job with id %s lost" % job_id)
1930

    
1931
    if result == constants.JOB_NOTCHANGED:
1932
      report_cbs.ReportNotChanged(job_id, status)
1933

    
1934
      # Wait again
1935
      continue
1936

    
1937
    # Split result, a tuple of (field values, log entries)
1938
    (job_info, log_entries) = result
1939
    (status, ) = job_info
1940

    
1941
    if log_entries:
1942
      for log_entry in log_entries:
1943
        (serial, timestamp, log_type, message) = log_entry
1944
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
1945
                                    log_type, message)
1946
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
1947

    
1948
    # TODO: Handle canceled and archived jobs
1949
    elif status in (constants.JOB_STATUS_SUCCESS,
1950
                    constants.JOB_STATUS_ERROR,
1951
                    constants.JOB_STATUS_CANCELING,
1952
                    constants.JOB_STATUS_CANCELED):
1953
      break
1954

    
1955
    prev_job_info = job_info
1956

    
1957
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1958
  if not jobs:
1959
    raise errors.JobLost("Job with id %s lost" % job_id)
1960

    
1961
  status, opstatus, result = jobs[0]
1962

    
1963
  if status == constants.JOB_STATUS_SUCCESS:
1964
    return result
1965

    
1966
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1967
    raise errors.OpExecError("Job was canceled")
1968

    
1969
  has_ok = False
1970
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
1971
    if status == constants.OP_STATUS_SUCCESS:
1972
      has_ok = True
1973
    elif status == constants.OP_STATUS_ERROR:
1974
      errors.MaybeRaise(msg)
1975

    
1976
      if has_ok:
1977
        raise errors.OpExecError("partial failure (opcode %d): %s" %
1978
                                 (idx, msg))
1979

    
1980
      raise errors.OpExecError(str(msg))
1981

    
1982
  # default failure mode
1983
  raise errors.OpExecError(result)
1984

    
1985

    
1986
class JobPollCbBase:
1987
  """Base class for L{GenericPollJob} callbacks.
1988

1989
  """
1990
  def __init__(self):
1991
    """Initializes this class.
1992

1993
    """
1994

    
1995
  def WaitForJobChangeOnce(self, job_id, fields,
1996
                           prev_job_info, prev_log_serial):
1997
    """Waits for changes on a job.
1998

1999
    """
2000
    raise NotImplementedError()
2001

    
2002
  def QueryJobs(self, job_ids, fields):
2003
    """Returns the selected fields for the selected job IDs.
2004

2005
    @type job_ids: list of numbers
2006
    @param job_ids: Job IDs
2007
    @type fields: list of strings
2008
    @param fields: Fields
2009

2010
    """
2011
    raise NotImplementedError()
2012

    
2013

    
2014
class JobPollReportCbBase:
2015
  """Base class for L{GenericPollJob} reporting callbacks.
2016

2017
  """
2018
  def __init__(self):
2019
    """Initializes this class.
2020

2021
    """
2022

    
2023
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2024
    """Handles a log message.
2025

2026
    """
2027
    raise NotImplementedError()
2028

    
2029
  def ReportNotChanged(self, job_id, status):
2030
    """Called for if a job hasn't changed in a while.
2031

2032
    @type job_id: number
2033
    @param job_id: Job ID
2034
    @type status: string or None
2035
    @param status: Job status if available
2036

2037
    """
2038
    raise NotImplementedError()
2039

    
2040

    
2041
class _LuxiJobPollCb(JobPollCbBase):
2042
  def __init__(self, cl):
2043
    """Initializes this class.
2044

2045
    """
2046
    JobPollCbBase.__init__(self)
2047
    self.cl = cl
2048

    
2049
  def WaitForJobChangeOnce(self, job_id, fields,
2050
                           prev_job_info, prev_log_serial):
2051
    """Waits for changes on a job.
2052

2053
    """
2054
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2055
                                        prev_job_info, prev_log_serial)
2056

    
2057
  def QueryJobs(self, job_ids, fields):
2058
    """Returns the selected fields for the selected job IDs.
2059

2060
    """
2061
    return self.cl.QueryJobs(job_ids, fields)
2062

    
2063

    
2064
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2065
  def __init__(self, feedback_fn):
2066
    """Initializes this class.
2067

2068
    """
2069
    JobPollReportCbBase.__init__(self)
2070

    
2071
    self.feedback_fn = feedback_fn
2072

    
2073
    assert callable(feedback_fn)
2074

    
2075
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2076
    """Handles a log message.
2077

2078
    """
2079
    self.feedback_fn((timestamp, log_type, log_msg))
2080

    
2081
  def ReportNotChanged(self, job_id, status):
2082
    """Called if a job hasn't changed in a while.
2083

2084
    """
2085
    # Ignore
2086

    
2087

    
2088
class StdioJobPollReportCb(JobPollReportCbBase):
2089
  def __init__(self):
2090
    """Initializes this class.
2091

2092
    """
2093
    JobPollReportCbBase.__init__(self)
2094

    
2095
    self.notified_queued = False
2096
    self.notified_waitlock = False
2097

    
2098
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2099
    """Handles a log message.
2100

2101
    """
2102
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2103
             FormatLogMessage(log_type, log_msg))
2104

    
2105
  def ReportNotChanged(self, job_id, status):
2106
    """Called if a job hasn't changed in a while.
2107

2108
    """
2109
    if status is None:
2110
      return
2111

    
2112
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2113
      ToStderr("Job %s is waiting in queue", job_id)
2114
      self.notified_queued = True
2115

    
2116
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2117
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2118
      self.notified_waitlock = True
2119

    
2120

    
2121
def FormatLogMessage(log_type, log_msg):
2122
  """Formats a job message according to its type.
2123

2124
  """
2125
  if log_type != constants.ELOG_MESSAGE:
2126
    log_msg = str(log_msg)
2127

    
2128
  return utils.SafeEncode(log_msg)
2129

    
2130

    
2131
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2132
  """Function to poll for the result of a job.
2133

2134
  @type job_id: job identified
2135
  @param job_id: the job to poll for results
2136
  @type cl: luxi.Client
2137
  @param cl: the luxi client to use for communicating with the master;
2138
             if None, a new client will be created
2139

2140
  """
2141
  if cl is None:
2142
    cl = GetClient()
2143

    
2144
  if reporter is None:
2145
    if feedback_fn:
2146
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2147
    else:
2148
      reporter = StdioJobPollReportCb()
2149
  elif feedback_fn:
2150
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2151

    
2152
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2153

    
2154

    
2155
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2156
  """Legacy function to submit an opcode.
2157

2158
  This is just a simple wrapper over the construction of the processor
2159
  instance. It should be extended to better handle feedback and
2160
  interaction functions.
2161

2162
  """
2163
  if cl is None:
2164
    cl = GetClient()
2165

    
2166
  SetGenericOpcodeOpts([op], opts)
2167

    
2168
  job_id = SendJob([op], cl=cl)
2169

    
2170
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2171
                       reporter=reporter)
2172

    
2173
  return op_results[0]
2174

    
2175

    
2176
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2177
  """Wrapper around SubmitOpCode or SendJob.
2178

2179
  This function will decide, based on the 'opts' parameter, whether to
2180
  submit and wait for the result of the opcode (and return it), or
2181
  whether to just send the job and print its identifier. It is used in
2182
  order to simplify the implementation of the '--submit' option.
2183

2184
  It will also process the opcodes if we're sending the via SendJob
2185
  (otherwise SubmitOpCode does it).
2186

2187
  """
2188
  if opts and opts.submit_only:
2189
    job = [op]
2190
    SetGenericOpcodeOpts(job, opts)
2191
    job_id = SendJob(job, cl=cl)
2192
    raise JobSubmittedException(job_id)
2193
  else:
2194
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2195

    
2196

    
2197
def SetGenericOpcodeOpts(opcode_list, options):
2198
  """Processor for generic options.
2199

2200
  This function updates the given opcodes based on generic command
2201
  line options (like debug, dry-run, etc.).
2202

2203
  @param opcode_list: list of opcodes
2204
  @param options: command line options or None
2205
  @return: None (in-place modification)
2206

2207
  """
2208
  if not options:
2209
    return
2210
  for op in opcode_list:
2211
    op.debug_level = options.debug
2212
    if hasattr(options, "dry_run"):
2213
      op.dry_run = options.dry_run
2214
    if getattr(options, "priority", None) is not None:
2215
      op.priority = options.priority
2216

    
2217

    
2218
def GetClient(query=False):
2219
  """Connects to the a luxi socket and returns a client.
2220

2221
  @type query: boolean
2222
  @param query: this signifies that the client will only be
2223
      used for queries; if the build-time parameter
2224
      enable-split-queries is enabled, then the client will be
2225
      connected to the query socket instead of the masterd socket
2226

2227
  """
2228
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2229
  if override_socket:
2230
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2231
      address = pathutils.MASTER_SOCKET
2232
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2233
      address = pathutils.QUERY_SOCKET
2234
    else:
2235
      address = override_socket
2236
  elif query and constants.ENABLE_SPLIT_QUERY:
2237
    address = pathutils.QUERY_SOCKET
2238
  else:
2239
    address = None
2240
  # TODO: Cache object?
2241
  try:
2242
    client = luxi.Client(address=address)
2243
  except luxi.NoMasterError:
2244
    ss = ssconf.SimpleStore()
2245

    
2246
    # Try to read ssconf file
2247
    try:
2248
      ss.GetMasterNode()
2249
    except errors.ConfigurationError:
2250
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2251
                                 " not part of a cluster",
2252
                                 errors.ECODE_INVAL)
2253

    
2254
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2255
    if master != myself:
2256
      raise errors.OpPrereqError("This is not the master node, please connect"
2257
                                 " to node '%s' and rerun the command" %
2258
                                 master, errors.ECODE_INVAL)
2259
    raise
2260
  return client
2261

    
2262

    
2263
def FormatError(err):
2264
  """Return a formatted error message for a given error.
2265

2266
  This function takes an exception instance and returns a tuple
2267
  consisting of two values: first, the recommended exit code, and
2268
  second, a string describing the error message (not
2269
  newline-terminated).
2270

2271
  """
2272
  retcode = 1
2273
  obuf = StringIO()
2274
  msg = str(err)
2275
  if isinstance(err, errors.ConfigurationError):
2276
    txt = "Corrupt configuration file: %s" % msg
2277
    logging.error(txt)
2278
    obuf.write(txt + "\n")
2279
    obuf.write("Aborting.")
2280
    retcode = 2
2281
  elif isinstance(err, errors.HooksAbort):
2282
    obuf.write("Failure: hooks execution failed:\n")
2283
    for node, script, out in err.args[0]:
2284
      if out:
2285
        obuf.write("  node: %s, script: %s, output: %s\n" %
2286
                   (node, script, out))
2287
      else:
2288
        obuf.write("  node: %s, script: %s (no output)\n" %
2289
                   (node, script))
2290
  elif isinstance(err, errors.HooksFailure):
2291
    obuf.write("Failure: hooks general failure: %s" % msg)
2292
  elif isinstance(err, errors.ResolverError):
2293
    this_host = netutils.Hostname.GetSysName()
2294
    if err.args[0] == this_host:
2295
      msg = "Failure: can't resolve my own hostname ('%s')"
2296
    else:
2297
      msg = "Failure: can't resolve hostname '%s'"
2298
    obuf.write(msg % err.args[0])
2299
  elif isinstance(err, errors.OpPrereqError):
2300
    if len(err.args) == 2:
2301
      obuf.write("Failure: prerequisites not met for this"
2302
                 " operation:\nerror type: %s, error details:\n%s" %
2303
                 (err.args[1], err.args[0]))
2304
    else:
2305
      obuf.write("Failure: prerequisites not met for this"
2306
                 " operation:\n%s" % msg)
2307
  elif isinstance(err, errors.OpExecError):
2308
    obuf.write("Failure: command execution error:\n%s" % msg)
2309
  elif isinstance(err, errors.TagError):
2310
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2311
  elif isinstance(err, errors.JobQueueDrainError):
2312
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2313
               " accept new requests\n")
2314
  elif isinstance(err, errors.JobQueueFull):
2315
    obuf.write("Failure: the job queue is full and doesn't accept new"
2316
               " job submissions until old jobs are archived\n")
2317
  elif isinstance(err, errors.TypeEnforcementError):
2318
    obuf.write("Parameter Error: %s" % msg)
2319
  elif isinstance(err, errors.ParameterError):
2320
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2321
  elif isinstance(err, luxi.NoMasterError):
2322
    if err.args[0] == pathutils.MASTER_SOCKET:
2323
      daemon = "the master daemon"
2324
    elif err.args[0] == pathutils.QUERY_SOCKET:
2325
      daemon = "the config daemon"
2326
    else:
2327
      daemon = "socket '%s'" % str(err.args[0])
2328
    obuf.write("Cannot communicate with %s.\nIs the process running"
2329
               " and listening for connections?" % daemon)
2330
  elif isinstance(err, luxi.TimeoutError):
2331
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2332
               " been submitted and will continue to run even if the call"
2333
               " timed out. Useful commands in this situation are \"gnt-job"
2334
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2335
    obuf.write(msg)
2336
  elif isinstance(err, luxi.PermissionError):
2337
    obuf.write("It seems you don't have permissions to connect to the"
2338
               " master daemon.\nPlease retry as a different user.")
2339
  elif isinstance(err, luxi.ProtocolError):
2340
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2341
               "%s" % msg)
2342
  elif isinstance(err, errors.JobLost):
2343
    obuf.write("Error checking job status: %s" % msg)
2344
  elif isinstance(err, errors.QueryFilterParseError):
2345
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2346
    obuf.write("\n".join(err.GetDetails()))
2347
  elif isinstance(err, errors.GenericError):
2348
    obuf.write("Unhandled Ganeti error: %s" % msg)
2349
  elif isinstance(err, JobSubmittedException):
2350
    obuf.write("JobID: %s\n" % err.args[0])
2351
    retcode = 0
2352
  else:
2353
    obuf.write("Unhandled exception: %s" % msg)
2354
  return retcode, obuf.getvalue().rstrip("\n")
2355

    
2356

    
2357
def GenericMain(commands, override=None, aliases=None,
2358
                env_override=frozenset()):
2359
  """Generic main function for all the gnt-* commands.
2360

2361
  @param commands: a dictionary with a special structure, see the design doc
2362
                   for command line handling.
2363
  @param override: if not None, we expect a dictionary with keys that will
2364
                   override command line options; this can be used to pass
2365
                   options from the scripts to generic functions
2366
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2367
  @param env_override: list of environment names which are allowed to submit
2368
                       default args for commands
2369

2370
  """
2371
  # save the program name and the entire command line for later logging
2372
  if sys.argv:
2373
    binary = os.path.basename(sys.argv[0])
2374
    if not binary:
2375
      binary = sys.argv[0]
2376

    
2377
    if len(sys.argv) >= 2:
2378
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2379
    else:
2380
      logname = binary
2381

    
2382
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2383
  else:
2384
    binary = "<unknown program>"
2385
    cmdline = "<unknown>"
2386

    
2387
  if aliases is None:
2388
    aliases = {}
2389

    
2390
  try:
2391
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2392
                                       env_override)
2393
  except _ShowVersion:
2394
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2395
             constants.RELEASE_VERSION)
2396
    return constants.EXIT_SUCCESS
2397
  except _ShowUsage, err:
2398
    for line in _FormatUsage(binary, commands):
2399
      ToStdout(line)
2400

    
2401
    if err.exit_error:
2402
      return constants.EXIT_FAILURE
2403
    else:
2404
      return constants.EXIT_SUCCESS
2405
  except errors.ParameterError, err:
2406
    result, err_msg = FormatError(err)
2407
    ToStderr(err_msg)
2408
    return 1
2409

    
2410
  if func is None: # parse error
2411
    return 1
2412

    
2413
  if override is not None:
2414
    for key, val in override.iteritems():
2415
      setattr(options, key, val)
2416

    
2417
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2418
                     stderr_logging=True)
2419

    
2420
  logging.info("Command line: %s", cmdline)
2421

    
2422
  try:
2423
    result = func(options, args)
2424
  except (errors.GenericError, luxi.ProtocolError,
2425
          JobSubmittedException), err:
2426
    result, err_msg = FormatError(err)
2427
    logging.exception("Error during command processing")
2428
    ToStderr(err_msg)
2429
  except KeyboardInterrupt:
2430
    result = constants.EXIT_FAILURE
2431
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2432
             " might have been submitted and"
2433
             " will continue to run in the background.")
2434
  except IOError, err:
2435
    if err.errno == errno.EPIPE:
2436
      # our terminal went away, we'll exit
2437
      sys.exit(constants.EXIT_FAILURE)
2438
    else:
2439
      raise
2440

    
2441
  return result
2442

    
2443

    
2444
def ParseNicOption(optvalue):
2445
  """Parses the value of the --net option(s).
2446

2447
  """
2448
  try:
2449
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2450
  except (TypeError, ValueError), err:
2451
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2452
                               errors.ECODE_INVAL)
2453

    
2454
  nics = [{}] * nic_max
2455
  for nidx, ndict in optvalue:
2456
    nidx = int(nidx)
2457

    
2458
    if not isinstance(ndict, dict):
2459
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2460
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2461

    
2462
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2463

    
2464
    nics[nidx] = ndict
2465

    
2466
  return nics
2467

    
2468

    
2469
def GenericInstanceCreate(mode, opts, args):
2470
  """Add an instance to the cluster via either creation or import.
2471

2472
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2473
  @param opts: the command line options selected by the user
2474
  @type args: list
2475
  @param args: should contain only one element, the new instance name
2476
  @rtype: int
2477
  @return: the desired exit code
2478

2479
  """
2480
  instance = args[0]
2481

    
2482
  (pnode, snode) = SplitNodeOption(opts.node)
2483

    
2484
  hypervisor = None
2485
  hvparams = {}
2486
  if opts.hypervisor:
2487
    hypervisor, hvparams = opts.hypervisor
2488

    
2489
  if opts.nics:
2490
    nics = ParseNicOption(opts.nics)
2491
  elif opts.no_nics:
2492
    # no nics
2493
    nics = []
2494
  elif mode == constants.INSTANCE_CREATE:
2495
    # default of one nic, all auto
2496
    nics = [{}]
2497
  else:
2498
    # mode == import
2499
    nics = []
2500

    
2501
  if opts.disk_template == constants.DT_DISKLESS:
2502
    if opts.disks or opts.sd_size is not None:
2503
      raise errors.OpPrereqError("Diskless instance but disk"
2504
                                 " information passed", errors.ECODE_INVAL)
2505
    disks = []
2506
  else:
2507
    if (not opts.disks and not opts.sd_size
2508
        and mode == constants.INSTANCE_CREATE):
2509
      raise errors.OpPrereqError("No disk information specified",
2510
                                 errors.ECODE_INVAL)
2511
    if opts.disks and opts.sd_size is not None:
2512
      raise errors.OpPrereqError("Please use either the '--disk' or"
2513
                                 " '-s' option", errors.ECODE_INVAL)
2514
    if opts.sd_size is not None:
2515
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2516

    
2517
    if opts.disks:
2518
      try:
2519
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2520
      except ValueError, err:
2521
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2522
                                   errors.ECODE_INVAL)
2523
      disks = [{}] * disk_max
2524
    else:
2525
      disks = []
2526
    for didx, ddict in opts.disks:
2527
      didx = int(didx)
2528
      if not isinstance(ddict, dict):
2529
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2530
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2531
      elif constants.IDISK_SIZE in ddict:
2532
        if constants.IDISK_ADOPT in ddict:
2533
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2534
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2535
        try:
2536
          ddict[constants.IDISK_SIZE] = \
2537
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2538
        except ValueError, err:
2539
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2540
                                     (didx, err), errors.ECODE_INVAL)
2541
      elif constants.IDISK_ADOPT in ddict:
2542
        if mode == constants.INSTANCE_IMPORT:
2543
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2544
                                     " import", errors.ECODE_INVAL)
2545
        ddict[constants.IDISK_SIZE] = 0
2546
      else:
2547
        raise errors.OpPrereqError("Missing size or adoption source for"
2548
                                   " disk %d" % didx, errors.ECODE_INVAL)
2549
      disks[didx] = ddict
2550

    
2551
  if opts.tags is not None:
2552
    tags = opts.tags.split(",")
2553
  else:
2554
    tags = []
2555

    
2556
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2557
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2558

    
2559
  if mode == constants.INSTANCE_CREATE:
2560
    start = opts.start
2561
    os_type = opts.os
2562
    force_variant = opts.force_variant
2563
    src_node = None
2564
    src_path = None
2565
    no_install = opts.no_install
2566
    identify_defaults = False
2567
  elif mode == constants.INSTANCE_IMPORT:
2568
    start = False
2569
    os_type = None
2570
    force_variant = False
2571
    src_node = opts.src_node
2572
    src_path = opts.src_dir
2573
    no_install = None
2574
    identify_defaults = opts.identify_defaults
2575
  else:
2576
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2577

    
2578
  op = opcodes.OpInstanceCreate(instance_name=instance,
2579
                                disks=disks,
2580
                                disk_template=opts.disk_template,
2581
                                nics=nics,
2582
                                conflicts_check=opts.conflicts_check,
2583
                                pnode=pnode, snode=snode,
2584
                                ip_check=opts.ip_check,
2585
                                name_check=opts.name_check,
2586
                                wait_for_sync=opts.wait_for_sync,
2587
                                file_storage_dir=opts.file_storage_dir,
2588
                                file_driver=opts.file_driver,
2589
                                iallocator=opts.iallocator,
2590
                                hypervisor=hypervisor,
2591
                                hvparams=hvparams,
2592
                                beparams=opts.beparams,
2593
                                osparams=opts.osparams,
2594
                                mode=mode,
2595
                                start=start,
2596
                                os_type=os_type,
2597
                                force_variant=force_variant,
2598
                                src_node=src_node,
2599
                                src_path=src_path,
2600
                                tags=tags,
2601
                                no_install=no_install,
2602
                                identify_defaults=identify_defaults,
2603
                                ignore_ipolicy=opts.ignore_ipolicy)
2604

    
2605
  SubmitOrSend(op, opts)
2606
  return 0
2607

    
2608

    
2609
class _RunWhileClusterStoppedHelper:
2610
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2611

2612
  """
2613
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2614
    """Initializes this class.
2615

2616
    @type feedback_fn: callable
2617
    @param feedback_fn: Feedback function
2618
    @type cluster_name: string
2619
    @param cluster_name: Cluster name
2620
    @type master_node: string
2621
    @param master_node Master node name
2622
    @type online_nodes: list
2623
    @param online_nodes: List of names of online nodes
2624

2625
    """
2626
    self.feedback_fn = feedback_fn
2627
    self.cluster_name = cluster_name
2628
    self.master_node = master_node
2629
    self.online_nodes = online_nodes
2630

    
2631
    self.ssh = ssh.SshRunner(self.cluster_name)
2632

    
2633
    self.nonmaster_nodes = [name for name in online_nodes
2634
                            if name != master_node]
2635

    
2636
    assert self.master_node not in self.nonmaster_nodes
2637

    
2638
  def _RunCmd(self, node_name, cmd):
2639
    """Runs a command on the local or a remote machine.
2640

2641
    @type node_name: string
2642
    @param node_name: Machine name
2643
    @type cmd: list
2644
    @param cmd: Command
2645

2646
    """
2647
    if node_name is None or node_name == self.master_node:
2648
      # No need to use SSH
2649
      result = utils.RunCmd(cmd)
2650
    else:
2651
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2652
                            utils.ShellQuoteArgs(cmd))
2653

    
2654
    if result.failed:
2655
      errmsg = ["Failed to run command %s" % result.cmd]
2656
      if node_name:
2657
        errmsg.append("on node %s" % node_name)
2658
      errmsg.append(": exitcode %s and error %s" %
2659
                    (result.exit_code, result.output))
2660
      raise errors.OpExecError(" ".join(errmsg))
2661

    
2662
  def Call(self, fn, *args):
2663
    """Call function while all daemons are stopped.
2664

2665
    @type fn: callable
2666
    @param fn: Function to be called
2667

2668
    """
2669
    # Pause watcher by acquiring an exclusive lock on watcher state file
2670
    self.feedback_fn("Blocking watcher")
2671
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2672
    try:
2673
      # TODO: Currently, this just blocks. There's no timeout.
2674
      # TODO: Should it be a shared lock?
2675
      watcher_block.Exclusive(blocking=True)
2676

    
2677
      # Stop master daemons, so that no new jobs can come in and all running
2678
      # ones are finished
2679
      self.feedback_fn("Stopping master daemons")
2680
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2681
      try:
2682
        # Stop daemons on all nodes
2683
        for node_name in self.online_nodes:
2684
          self.feedback_fn("Stopping daemons on %s" % node_name)
2685
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2686

    
2687
        # All daemons are shut down now
2688
        try:
2689
          return fn(self, *args)
2690
        except Exception, err:
2691
          _, errmsg = FormatError(err)
2692
          logging.exception("Caught exception")
2693
          self.feedback_fn(errmsg)
2694
          raise
2695
      finally:
2696
        # Start cluster again, master node last
2697
        for node_name in self.nonmaster_nodes + [self.master_node]:
2698
          self.feedback_fn("Starting daemons on %s" % node_name)
2699
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2700
    finally:
2701
      # Resume watcher
2702
      watcher_block.Close()
2703

    
2704

    
2705
def RunWhileClusterStopped(feedback_fn, fn, *args):
2706
  """Calls a function while all cluster daemons are stopped.
2707

2708
  @type feedback_fn: callable
2709
  @param feedback_fn: Feedback function
2710
  @type fn: callable
2711
  @param fn: Function to be called when daemons are stopped
2712

2713
  """
2714
  feedback_fn("Gathering cluster information")
2715

    
2716
  # This ensures we're running on the master daemon
2717
  cl = GetClient()
2718

    
2719
  (cluster_name, master_node) = \
2720
    cl.QueryConfigValues(["cluster_name", "master_node"])
2721

    
2722
  online_nodes = GetOnlineNodes([], cl=cl)
2723

    
2724
  # Don't keep a reference to the client. The master daemon will go away.
2725
  del cl
2726

    
2727
  assert master_node in online_nodes
2728

    
2729
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2730
                                       online_nodes).Call(fn, *args)
2731

    
2732

    
2733
def GenerateTable(headers, fields, separator, data,
2734
                  numfields=None, unitfields=None,
2735
                  units=None):
2736
  """Prints a table with headers and different fields.
2737

2738
  @type headers: dict
2739
  @param headers: dictionary mapping field names to headers for
2740
      the table
2741
  @type fields: list
2742
  @param fields: the field names corresponding to each row in
2743
      the data field
2744
  @param separator: the separator to be used; if this is None,
2745
      the default 'smart' algorithm is used which computes optimal
2746
      field width, otherwise just the separator is used between
2747
      each field
2748
  @type data: list
2749
  @param data: a list of lists, each sublist being one row to be output
2750
  @type numfields: list
2751
  @param numfields: a list with the fields that hold numeric
2752
      values and thus should be right-aligned
2753
  @type unitfields: list
2754
  @param unitfields: a list with the fields that hold numeric
2755
      values that should be formatted with the units field
2756
  @type units: string or None
2757
  @param units: the units we should use for formatting, or None for
2758
      automatic choice (human-readable for non-separator usage, otherwise
2759
      megabytes); this is a one-letter string
2760

2761
  """
2762
  if units is None:
2763
    if separator:
2764
      units = "m"
2765
    else:
2766
      units = "h"
2767

    
2768
  if numfields is None:
2769
    numfields = []
2770
  if unitfields is None:
2771
    unitfields = []
2772

    
2773
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2774
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2775

    
2776
  format_fields = []
2777
  for field in fields:
2778
    if headers and field not in headers:
2779
      # TODO: handle better unknown fields (either revert to old
2780
      # style of raising exception, or deal more intelligently with
2781
      # variable fields)
2782
      headers[field] = field
2783
    if separator is not None:
2784
      format_fields.append("%s")
2785
    elif numfields.Matches(field):
2786
      format_fields.append("%*s")
2787
    else:
2788
      format_fields.append("%-*s")
2789

    
2790
  if separator is None:
2791
    mlens = [0 for name in fields]
2792
    format_str = " ".join(format_fields)
2793
  else:
2794
    format_str = separator.replace("%", "%%").join(format_fields)
2795

    
2796
  for row in data:
2797
    if row is None:
2798
      continue
2799
    for idx, val in enumerate(row):
2800
      if unitfields.Matches(fields[idx]):
2801
        try:
2802
          val = int(val)
2803
        except (TypeError, ValueError):
2804
          pass
2805
        else:
2806
          val = row[idx] = utils.FormatUnit(val, units)
2807
      val = row[idx] = str(val)
2808
      if separator is None:
2809
        mlens[idx] = max(mlens[idx], len(val))
2810

    
2811
  result = []
2812
  if headers:
2813
    args = []
2814
    for idx, name in enumerate(fields):
2815
      hdr = headers[name]
2816
      if separator is None:
2817
        mlens[idx] = max(mlens[idx], len(hdr))
2818
        args.append(mlens[idx])
2819
      args.append(hdr)
2820
    result.append(format_str % tuple(args))
2821

    
2822
  if separator is None:
2823
    assert len(mlens) == len(fields)
2824

    
2825
    if fields and not numfields.Matches(fields[-1]):
2826
      mlens[-1] = 0
2827

    
2828
  for line in data:
2829
    args = []
2830
    if line is None:
2831
      line = ["-" for _ in fields]
2832
    for idx in range(len(fields)):
2833
      if separator is None:
2834
        args.append(mlens[idx])
2835
      args.append(line[idx])
2836
    result.append(format_str % tuple(args))
2837

    
2838
  return result
2839

    
2840

    
2841
def _FormatBool(value):
2842
  """Formats a boolean value as a string.
2843

2844
  """
2845
  if value:
2846
    return "Y"
2847
  return "N"
2848

    
2849

    
2850
#: Default formatting for query results; (callback, align right)
2851
_DEFAULT_FORMAT_QUERY = {
2852
  constants.QFT_TEXT: (str, False),
2853
  constants.QFT_BOOL: (_FormatBool, False),
2854
  constants.QFT_NUMBER: (str, True),
2855
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2856
  constants.QFT_OTHER: (str, False),
2857
  constants.QFT_UNKNOWN: (str, False),
2858
  }
2859

    
2860

    
2861
def _GetColumnFormatter(fdef, override, unit):
2862
  """Returns formatting function for a field.
2863

2864
  @type fdef: L{objects.QueryFieldDefinition}
2865
  @type override: dict
2866
  @param override: Dictionary for overriding field formatting functions,
2867
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2868
  @type unit: string
2869
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2870
  @rtype: tuple; (callable, bool)
2871
  @return: Returns the function to format a value (takes one parameter) and a
2872
    boolean for aligning the value on the right-hand side
2873

2874
  """
2875
  fmt = override.get(fdef.name, None)
2876
  if fmt is not None:
2877
    return fmt
2878

    
2879
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
2880

    
2881
  if fdef.kind == constants.QFT_UNIT:
2882
    # Can't keep this information in the static dictionary
2883
    return (lambda value: utils.FormatUnit(value, unit), True)
2884

    
2885
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
2886
  if fmt is not None:
2887
    return fmt
2888

    
2889
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2890

    
2891

    
2892
class _QueryColumnFormatter:
2893
  """Callable class for formatting fields of a query.
2894

2895
  """
2896
  def __init__(self, fn, status_fn, verbose):
2897
    """Initializes this class.
2898

2899
    @type fn: callable
2900
    @param fn: Formatting function
2901
    @type status_fn: callable
2902
    @param status_fn: Function to report fields' status
2903
    @type verbose: boolean
2904
    @param verbose: whether to use verbose field descriptions or not
2905

2906
    """
2907
    self._fn = fn
2908
    self._status_fn = status_fn
2909
    self._verbose = verbose
2910

    
2911
  def __call__(self, data):
2912
    """Returns a field's string representation.
2913

2914
    """
2915
    (status, value) = data
2916

    
2917
    # Report status
2918
    self._status_fn(status)
2919

    
2920
    if status == constants.RS_NORMAL:
2921
      return self._fn(value)
2922

    
2923
    assert value is None, \
2924
           "Found value %r for abnormal status %s" % (value, status)
2925

    
2926
    return FormatResultError(status, self._verbose)
2927

    
2928

    
2929
def FormatResultError(status, verbose):
2930
  """Formats result status other than L{constants.RS_NORMAL}.
2931

2932
  @param status: The result status
2933
  @type verbose: boolean
2934
  @param verbose: Whether to return the verbose text
2935
  @return: Text of result status
2936

2937
  """
2938
  assert status != constants.RS_NORMAL, \
2939
         "FormatResultError called with status equal to constants.RS_NORMAL"
2940
  try:
2941
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
2942
  except KeyError:
2943
    raise NotImplementedError("Unknown status %s" % status)
2944
  else:
2945
    if verbose:
2946
      return verbose_text
2947
    return normal_text
2948

    
2949

    
2950
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
2951
                      header=False, verbose=False):
2952
  """Formats data in L{objects.QueryResponse}.
2953

2954
  @type result: L{objects.QueryResponse}
2955
  @param result: result of query operation
2956
  @type unit: string
2957
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
2958
    see L{utils.text.FormatUnit}
2959
  @type format_override: dict
2960
  @param format_override: Dictionary for overriding field formatting functions,
2961
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2962
  @type separator: string or None
2963
  @param separator: String used to separate fields
2964
  @type header: bool
2965
  @param header: Whether to output header row
2966
  @type verbose: boolean
2967
  @param verbose: whether to use verbose field descriptions or not
2968

2969
  """
2970
  if unit is None:
2971
    if separator:
2972
      unit = "m"
2973
    else:
2974
      unit = "h"
2975

    
2976
  if format_override is None:
2977
    format_override = {}
2978

    
2979
  stats = dict.fromkeys(constants.RS_ALL, 0)
2980

    
2981
  def _RecordStatus(status):
2982
    if status in stats:
2983
      stats[status] += 1
2984

    
2985
  columns = []
2986
  for fdef in result.fields:
2987
    assert fdef.title and fdef.name
2988
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2989
    columns.append(TableColumn(fdef.title,
2990
                               _QueryColumnFormatter(fn, _RecordStatus,
2991
                                                     verbose),
2992
                               align_right))
2993

    
2994
  table = FormatTable(result.data, columns, header, separator)
2995

    
2996
  # Collect statistics
2997
  assert len(stats) == len(constants.RS_ALL)
2998
  assert compat.all(count >= 0 for count in stats.values())
2999

    
3000
  # Determine overall status. If there was no data, unknown fields must be
3001
  # detected via the field definitions.
3002
  if (stats[constants.RS_UNKNOWN] or
3003
      (not result.data and _GetUnknownFields(result.fields))):
3004
    status = QR_UNKNOWN
3005
  elif compat.any(count > 0 for key, count in stats.items()
3006
                  if key != constants.RS_NORMAL):
3007
    status = QR_INCOMPLETE
3008
  else:
3009
    status = QR_NORMAL
3010

    
3011
  return (status, table)
3012

    
3013

    
3014
def _GetUnknownFields(fdefs):
3015
  """Returns list of unknown fields included in C{fdefs}.
3016

3017
  @type fdefs: list of L{objects.QueryFieldDefinition}
3018

3019
  """
3020
  return [fdef for fdef in fdefs
3021
          if fdef.kind == constants.QFT_UNKNOWN]
3022

    
3023

    
3024
def _WarnUnknownFields(fdefs):
3025
  """Prints a warning to stderr if a query included unknown fields.
3026

3027
  @type fdefs: list of L{objects.QueryFieldDefinition}
3028

3029
  """
3030
  unknown = _GetUnknownFields(fdefs)
3031
  if unknown:
3032
    ToStderr("Warning: Queried for unknown fields %s",
3033
             utils.CommaJoin(fdef.name for fdef in unknown))
3034
    return True
3035

    
3036
  return False
3037

    
3038

    
3039
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3040
                format_override=None, verbose=False, force_filter=False,
3041
                namefield=None, qfilter=None, isnumeric=False):
3042
  """Generic implementation for listing all items of a resource.
3043

3044
  @param resource: One of L{constants.QR_VIA_LUXI}
3045
  @type fields: list of strings
3046
  @param fields: List of fields to query for
3047
  @type names: list of strings
3048
  @param names: Names of items to query for
3049
  @type unit: string or None
3050
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3051
    None for automatic choice (human-readable for non-separator usage,
3052
    otherwise megabytes); this is a one-letter string
3053
  @type separator: string or None
3054
  @param separator: String used to separate fields
3055
  @type header: bool
3056
  @param header: Whether to show header row
3057
  @type force_filter: bool
3058
  @param force_filter: Whether to always treat names as filter
3059
  @type format_override: dict
3060
  @param format_override: Dictionary for overriding field formatting functions,
3061
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3062
  @type verbose: boolean
3063
  @param verbose: whether to use verbose field descriptions or not
3064
  @type namefield: string
3065
  @param namefield: Name of field to use for simple filters (see
3066
    L{qlang.MakeFilter} for details)
3067
  @type qfilter: list or None
3068
  @param qfilter: Query filter (in addition to names)
3069
  @param isnumeric: bool
3070
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3071
    any simple filters built by namefield should use integer values to
3072
    reflect that
3073

3074
  """
3075
  if not names:
3076
    names = None
3077

    
3078
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3079
                                isnumeric=isnumeric)
3080

    
3081
  if qfilter is None:
3082
    qfilter = namefilter
3083
  elif namefilter is not None:
3084
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3085

    
3086
  if cl is None:
3087
    cl = GetClient()
3088

    
3089
  response = cl.Query(resource, fields, qfilter)
3090

    
3091
  found_unknown = _WarnUnknownFields(response.fields)
3092

    
3093
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3094
                                     header=header,
3095
                                     format_override=format_override,
3096
                                     verbose=verbose)
3097

    
3098
  for line in data:
3099
    ToStdout(line)
3100

    
3101
  assert ((found_unknown and status == QR_UNKNOWN) or
3102
          (not found_unknown and status != QR_UNKNOWN))
3103

    
3104
  if status == QR_UNKNOWN:
3105
    return constants.EXIT_UNKNOWN_FIELD
3106

    
3107
  # TODO: Should the list command fail if not all data could be collected?
3108
  return constants.EXIT_SUCCESS
3109

    
3110

    
3111
def _FieldDescValues(fdef):
3112
  """Helper function for L{GenericListFields} to get query field description.
3113

3114
  @type fdef: L{objects.QueryFieldDefinition}
3115
  @rtype: list
3116

3117
  """
3118
  return [
3119
    fdef.name,
3120
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3121
    fdef.title,
3122
    fdef.doc,
3123
    ]
3124

    
3125

    
3126
def GenericListFields(resource, fields, separator, header, cl=None):
3127
  """Generic implementation for listing fields for a resource.
3128

3129
  @param resource: One of L{constants.QR_VIA_LUXI}
3130
  @type fields: list of strings
3131
  @param fields: List of fields to query for
3132
  @type separator: string or None
3133
  @param separator: String used to separate fields
3134
  @type header: bool
3135
  @param header: Whether to show header row
3136

3137
  """
3138
  if cl is None:
3139
    cl = GetClient()
3140

    
3141
  if not fields:
3142
    fields = None
3143

    
3144
  response = cl.QueryFields(resource, fields)
3145

    
3146
  found_unknown = _WarnUnknownFields(response.fields)
3147

    
3148
  columns = [
3149
    TableColumn("Name", str, False),
3150
    TableColumn("Type", str, False),
3151
    TableColumn("Title", str, False),
3152
    TableColumn("Description", str, False),
3153
    ]
3154

    
3155
  rows = map(_FieldDescValues, response.fields)
3156

    
3157
  for line in FormatTable(rows, columns, header, separator):
3158
    ToStdout(line)
3159

    
3160
  if found_unknown:
3161
    return constants.EXIT_UNKNOWN_FIELD
3162

    
3163
  return constants.EXIT_SUCCESS
3164

    
3165

    
3166
class TableColumn:
3167
  """Describes a column for L{FormatTable}.
3168

3169
  """
3170
  def __init__(self, title, fn, align_right):
3171
    """Initializes this class.
3172

3173
    @type title: string
3174
    @param title: Column title
3175
    @type fn: callable
3176
    @param fn: Formatting function
3177
    @type align_right: bool
3178
    @param align_right: Whether to align values on the right-hand side
3179

3180
    """
3181
    self.title = title
3182
    self.format = fn
3183
    self.align_right = align_right
3184

    
3185

    
3186
def _GetColFormatString(width, align_right):
3187
  """Returns the format string for a field.
3188

3189
  """
3190
  if align_right:
3191
    sign = ""
3192
  else:
3193
    sign = "-"
3194

    
3195
  return "%%%s%ss" % (sign, width)
3196

    
3197

    
3198
def FormatTable(rows, columns, header, separator):
3199
  """Formats data as a table.
3200

3201
  @type rows: list of lists
3202
  @param rows: Row data, one list per row
3203
  @type columns: list of L{TableColumn}
3204
  @param columns: Column descriptions
3205
  @type header: bool
3206
  @param header: Whether to show header row
3207
  @type separator: string or None
3208
  @param separator: String used to separate columns
3209

3210
  """
3211
  if header:
3212
    data = [[col.title for col in columns]]
3213
    colwidth = [len(col.title) for col in columns]
3214
  else:
3215
    data = []
3216
    colwidth = [0 for _ in columns]
3217

    
3218
  # Format row data
3219
  for row in rows:
3220
    assert len(row) == len(columns)
3221

    
3222
    formatted = [col.format(value) for value, col in zip(row, columns)]
3223

    
3224
    if separator is None:
3225
      # Update column widths
3226
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3227
        # Modifying a list's items while iterating is fine
3228
        colwidth[idx] = max(oldwidth, len(value))
3229

    
3230
    data.append(formatted)
3231

    
3232
  if separator is not None:
3233
    # Return early if a separator is used
3234
    return [separator.join(row) for row in data]
3235

    
3236
  if columns and not columns[-1].align_right:
3237
    # Avoid unnecessary spaces at end of line
3238
    colwidth[-1] = 0
3239

    
3240
  # Build format string
3241
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3242
                  for col, width in zip(columns, colwidth)])
3243

    
3244
  return [fmt % tuple(row) for row in data]
3245

    
3246

    
3247
def FormatTimestamp(ts):
3248
  """Formats a given timestamp.
3249

3250
  @type ts: timestamp
3251
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3252

3253
  @rtype: string
3254
  @return: a string with the formatted timestamp
3255

3256
  """
3257
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3258
    return "?"
3259

    
3260
  (sec, usecs) = ts
3261
  return utils.FormatTime(sec, usecs=usecs)
3262

    
3263

    
3264
def ParseTimespec(value):
3265
  """Parse a time specification.
3266

3267
  The following suffixed will be recognized:
3268

3269
    - s: seconds
3270
    - m: minutes
3271
    - h: hours
3272
    - d: day
3273
    - w: weeks
3274

3275
  Without any suffix, the value will be taken to be in seconds.
3276

3277
  """
3278
  value = str(value)
3279
  if not value:
3280
    raise errors.OpPrereqError("Empty time specification passed",
3281
                               errors.ECODE_INVAL)
3282
  suffix_map = {
3283
    "s": 1,
3284
    "m": 60,
3285
    "h": 3600,
3286
    "d": 86400,
3287
    "w": 604800,
3288
    }
3289
  if value[-1] not in suffix_map:
3290
    try:
3291
      value = int(value)
3292
    except (TypeError, ValueError):
3293
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3294
                                 errors.ECODE_INVAL)
3295
  else:
3296
    multiplier = suffix_map[value[-1]]
3297
    value = value[:-1]
3298
    if not value: # no data left after stripping the suffix
3299
      raise errors.OpPrereqError("Invalid time specification (only"
3300
                                 " suffix passed)", errors.ECODE_INVAL)
3301
    try:
3302
      value = int(value) * multiplier
3303
    except (TypeError, ValueError):
3304
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3305
                                 errors.ECODE_INVAL)
3306
  return value
3307

    
3308

    
3309
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3310
                   filter_master=False, nodegroup=None):
3311
  """Returns the names of online nodes.
3312

3313
  This function will also log a warning on stderr with the names of
3314
  the online nodes.
3315

3316
  @param nodes: if not empty, use only this subset of nodes (minus the
3317
      offline ones)
3318
  @param cl: if not None, luxi client to use
3319
  @type nowarn: boolean
3320
  @param nowarn: by default, this function will output a note with the
3321
      offline nodes that are skipped; if this parameter is True the
3322
      note is not displayed
3323
  @type secondary_ips: boolean
3324
  @param secondary_ips: if True, return the secondary IPs instead of the
3325
      names, useful for doing network traffic over the replication interface
3326
      (if any)
3327
  @type filter_master: boolean
3328
  @param filter_master: if True, do not return the master node in the list
3329
      (useful in coordination with secondary_ips where we cannot check our
3330
      node name against the list)
3331
  @type nodegroup: string
3332
  @param nodegroup: If set, only return nodes in this node group
3333

3334
  """
3335
  if cl is None:
3336
    cl = GetClient()
3337

    
3338
  qfilter = []
3339

    
3340
  if nodes:
3341
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3342

    
3343
  if nodegroup is not None:
3344
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3345
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3346

    
3347
  if filter_master:
3348
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3349

    
3350
  if qfilter:
3351
    if len(qfilter) > 1:
3352
      final_filter = [qlang.OP_AND] + qfilter
3353
    else:
3354
      assert len(qfilter) == 1
3355
      final_filter = qfilter[0]
3356
  else:
3357
    final_filter = None
3358

    
3359
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3360

    
3361
  def _IsOffline(row):
3362
    (_, (_, offline), _) = row
3363
    return offline
3364

    
3365
  def _GetName(row):
3366
    ((_, name), _, _) = row
3367
    return name
3368

    
3369
  def _GetSip(row):
3370
    (_, _, (_, sip)) = row
3371
    return sip
3372

    
3373
  (offline, online) = compat.partition(result.data, _IsOffline)
3374

    
3375
  if offline and not nowarn:
3376
    ToStderr("Note: skipping offline node(s): %s" %
3377
             utils.CommaJoin(map(_GetName, offline)))
3378

    
3379
  if secondary_ips:
3380
    fn = _GetSip
3381
  else:
3382
    fn = _GetName
3383

    
3384
  return map(fn, online)
3385

    
3386

    
3387
def _ToStream(stream, txt, *args):
3388
  """Write a message to a stream, bypassing the logging system
3389

3390
  @type stream: file object
3391
  @param stream: the file to which we should write
3392
  @type txt: str
3393
  @param txt: the message
3394

3395
  """
3396
  try:
3397
    if args:
3398
      args = tuple(args)
3399
      stream.write(txt % args)
3400
    else:
3401
      stream.write(txt)
3402
    stream.write("\n")
3403
    stream.flush()
3404
  except IOError, err:
3405
    if err.errno == errno.EPIPE:
3406
      # our terminal went away, we'll exit
3407
      sys.exit(constants.EXIT_FAILURE)
3408
    else:
3409
      raise
3410

    
3411

    
3412
def ToStdout(txt, *args):
3413
  """Write a message to stdout only, bypassing the logging system
3414

3415
  This is just a wrapper over _ToStream.
3416

3417
  @type txt: str
3418
  @param txt: the message
3419

3420
  """
3421
  _ToStream(sys.stdout, txt, *args)
3422

    
3423

    
3424
def ToStderr(txt, *args):
3425
  """Write a message to stderr only, bypassing the logging system
3426

3427
  This is just a wrapper over _ToStream.
3428

3429
  @type txt: str
3430
  @param txt: the message
3431

3432
  """
3433
  _ToStream(sys.stderr, txt, *args)
3434

    
3435

    
3436
class JobExecutor(object):
3437
  """Class which manages the submission and execution of multiple jobs.
3438

3439
  Note that instances of this class should not be reused between
3440
  GetResults() calls.
3441

3442
  """
3443
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3444
    self.queue = []
3445
    if cl is None:
3446
      cl = GetClient()
3447
    self.cl = cl
3448
    self.verbose = verbose
3449
    self.jobs = []
3450
    self.opts = opts
3451
    self.feedback_fn = feedback_fn
3452
    self._counter = itertools.count()
3453

    
3454
  @staticmethod
3455
  def _IfName(name, fmt):
3456
    """Helper function for formatting name.
3457

3458
    """
3459
    if name:
3460
      return fmt % name
3461

    
3462
    return ""
3463

    
3464
  def QueueJob(self, name, *ops):
3465
    """Record a job for later submit.
3466

3467
    @type name: string
3468
    @param name: a description of the job, will be used in WaitJobSet
3469

3470
    """
3471
    SetGenericOpcodeOpts(ops, self.opts)
3472
    self.queue.append((self._counter.next(), name, ops))
3473

    
3474
  def AddJobId(self, name, status, job_id):
3475
    """Adds a job ID to the internal queue.
3476

3477
    """
3478
    self.jobs.append((self._counter.next(), status, job_id, name))
3479

    
3480
  def SubmitPending(self, each=False):
3481
    """Submit all pending jobs.
3482

3483
    """
3484
    if each:
3485
      results = []
3486
      for (_, _, ops) in self.queue:
3487
        # SubmitJob will remove the success status, but raise an exception if
3488
        # the submission fails, so we'll notice that anyway.
3489
        results.append([True, self.cl.SubmitJob(ops)[0]])
3490
    else:
3491
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3492
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3493
      self.jobs.append((idx, status, data, name))
3494

    
3495
  def _ChooseJob(self):
3496
    """Choose a non-waiting/queued job to poll next.
3497

3498
    """
3499
    assert self.jobs, "_ChooseJob called with empty job list"
3500

    
3501
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3502
                               ["status"])
3503
    assert result
3504

    
3505
    for job_data, status in zip(self.jobs, result):
3506
      if (isinstance(status, list) and status and
3507
          status[0] in (constants.JOB_STATUS_QUEUED,
3508
                        constants.JOB_STATUS_WAITING,
3509
                        constants.JOB_STATUS_CANCELING)):
3510
        # job is still present and waiting
3511
        continue
3512
      # good candidate found (either running job or lost job)
3513
      self.jobs.remove(job_data)
3514
      return job_data
3515

    
3516
    # no job found
3517
    return self.jobs.pop(0)
3518

    
3519
  def GetResults(self):
3520
    """Wait for and return the results of all jobs.
3521

3522
    @rtype: list
3523
    @return: list of tuples (success, job results), in the same order
3524
        as the submitted jobs; if a job has failed, instead of the result
3525
        there will be the error message
3526

3527
    """
3528
    if not self.jobs:
3529
      self.SubmitPending()
3530
    results = []
3531
    if self.verbose:
3532
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3533
      if ok_jobs:
3534
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3535

    
3536
    # first, remove any non-submitted jobs
3537
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3538
    for idx, _, jid, name in failures:
3539
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3540
      results.append((idx, False, jid))
3541

    
3542
    while self.jobs:
3543
      (idx, _, jid, name) = self._ChooseJob()
3544
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3545
      try:
3546
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3547
        success = True
3548
      except errors.JobLost, err:
3549
        _, job_result = FormatError(err)
3550
        ToStderr("Job %s%s has been archived, cannot check its result",
3551
                 jid, self._IfName(name, " for %s"))
3552
        success = False
3553
      except (errors.GenericError, luxi.ProtocolError), err:
3554
        _, job_result = FormatError(err)
3555
        success = False
3556
        # the error message will always be shown, verbose or not
3557
        ToStderr("Job %s%s has failed: %s",
3558
                 jid, self._IfName(name, " for %s"), job_result)
3559

    
3560
      results.append((idx, success, job_result))
3561

    
3562
    # sort based on the index, then drop it
3563
    results.sort()
3564
    results = [i[1:] for i in results]
3565

    
3566
    return results
3567

    
3568
  def WaitOrShow(self, wait):
3569
    """Wait for job results or only print the job IDs.
3570

3571
    @type wait: boolean
3572
    @param wait: whether to wait or not
3573

3574
    """
3575
    if wait:
3576
      return self.GetResults()
3577
    else:
3578
      if not self.jobs:
3579
        self.SubmitPending()
3580
      for _, status, result, name in self.jobs:
3581
        if status:
3582
          ToStdout("%s: %s", result, name)
3583
        else:
3584
          ToStderr("Failure for %s: %s", name, result)
3585
      return [row[1:3] for row in self.jobs]
3586

    
3587

    
3588
def FormatParameterDict(buf, param_dict, actual, level=1):
3589
  """Formats a parameter dictionary.
3590

3591
  @type buf: L{StringIO}
3592
  @param buf: the buffer into which to write
3593
  @type param_dict: dict
3594
  @param param_dict: the own parameters
3595
  @type actual: dict
3596
  @param actual: the current parameter set (including defaults)
3597
  @param level: Level of indent
3598

3599
  """
3600
  indent = "  " * level
3601

    
3602
  for key in sorted(actual):
3603
    data = actual[key]
3604
    buf.write("%s- %s:" % (indent, key))
3605

    
3606
    if isinstance(data, dict) and data:
3607
      buf.write("\n")
3608
      FormatParameterDict(buf, param_dict.get(key, {}), data,
3609
                          level=level + 1)
3610
    else:
3611
      val = param_dict.get(key, "default (%s)" % data)
3612
      buf.write(" %s\n" % val)
3613

    
3614

    
3615
def FormatParamsDictInfo(param_dict, actual):
3616
  """Formats a parameter dictionary.
3617

3618
  @type param_dict: dict
3619
  @param param_dict: the own parameters
3620
  @type actual: dict
3621
  @param actual: the current parameter set (including defaults)
3622
  @rtype: dict
3623
  @return: dictionary where the value of each parameter is either a fully
3624
      formatted string or a dictionary containing formatted strings
3625

3626
  """
3627
  ret = {}
3628
  for (key, data) in actual.items():
3629
    if isinstance(data, dict) and data:
3630
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3631
    else:
3632
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3633
  return ret
3634

    
3635

    
3636
def ConfirmOperation(names, list_type, text, extra=""):
3637
  """Ask the user to confirm an operation on a list of list_type.
3638

3639
  This function is used to request confirmation for doing an operation
3640
  on a given list of list_type.
3641

3642
  @type names: list
3643
  @param names: the list of names that we display when
3644
      we ask for confirmation
3645
  @type list_type: str
3646
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3647
  @type text: str
3648
  @param text: the operation that the user should confirm
3649
  @rtype: boolean
3650
  @return: True or False depending on user's confirmation.
3651

3652
  """
3653
  count = len(names)
3654
  msg = ("The %s will operate on %d %s.\n%s"
3655
         "Do you want to continue?" % (text, count, list_type, extra))
3656
  affected = (("\nAffected %s:\n" % list_type) +
3657
              "\n".join(["  %s" % name for name in names]))
3658

    
3659
  choices = [("y", True, "Yes, execute the %s" % text),
3660
             ("n", False, "No, abort the %s" % text)]
3661

    
3662
  if count > 20:
3663
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3664
    question = msg
3665
  else:
3666
    question = msg + affected
3667

    
3668
  choice = AskUser(question, choices)
3669
  if choice == "v":
3670
    choices.pop(1)
3671
    choice = AskUser(msg + affected, choices)
3672
  return choice
3673

    
3674

    
3675
def _MaybeParseUnit(elements):
3676
  """Parses and returns an array of potential values with units.
3677

3678
  """
3679
  parsed = {}
3680
  for k, v in elements.items():
3681
    if v == constants.VALUE_DEFAULT:
3682
      parsed[k] = v
3683
    else:
3684
      parsed[k] = utils.ParseUnit(v)
3685
  return parsed
3686

    
3687

    
3688
def _InitIspecsFromOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3689
                        ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3690
                        group_ipolicy, allowed_values):
3691
  try:
3692
    if ispecs_mem_size:
3693
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3694
    if ispecs_disk_size:
3695
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3696
  except (TypeError, ValueError, errors.UnitParseError), err:
3697
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3698
                               " in policy: %s" %
3699
                               (ispecs_disk_size, ispecs_mem_size, err),
3700
                               errors.ECODE_INVAL)
3701

    
3702
  # prepare ipolicy dict
3703
  ispecs_transposed = {
3704
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3705
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3706
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3707
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3708
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3709
    }
3710

    
3711
  # first, check that the values given are correct
3712
  if group_ipolicy:
3713
    forced_type = TISPECS_GROUP_TYPES
3714
  else:
3715
    forced_type = TISPECS_CLUSTER_TYPES
3716
  for specs in ispecs_transposed.values():
3717
    utils.ForceDictType(specs, forced_type, allowed_values=allowed_values)
3718

    
3719
  # then transpose
3720
  ispecs = {
3721
    constants.ISPECS_MIN: {},
3722
    constants.ISPECS_MAX: {},
3723
    constants.ISPECS_STD: {},
3724
    }
3725
  for (name, specs) in ispecs_transposed.iteritems():
3726
    assert name in constants.ISPECS_PARAMETERS
3727
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3728
      assert key in ispecs
3729
      ispecs[key][name] = val
3730
  for key in constants.ISPECS_MINMAX_KEYS:
3731
    ipolicy[constants.ISPECS_MINMAX][key] = ispecs[key]
3732
  ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3733

    
3734

    
3735
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3736
                          ispecs_cpu_count=None,
3737
                          ispecs_disk_count=None,
3738
                          ispecs_disk_size=None,
3739
                          ispecs_nic_count=None,
3740
                          ipolicy_disk_templates=None,
3741
                          ipolicy_vcpu_ratio=None,
3742
                          ipolicy_spindle_ratio=None,
3743
                          group_ipolicy=False,
3744
                          allowed_values=None,
3745
                          fill_all=False):
3746
  """Creation of instance policy based on command line options.
3747

3748
  @param fill_all: whether for cluster policies we should ensure that
3749
    all values are filled
3750

3751

3752
  """
3753

    
3754
  ipolicy_out = objects.MakeEmptyIPolicy()
3755
  _InitIspecsFromOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
3756
                      ispecs_disk_count, ispecs_disk_size, ispecs_nic_count,
3757
                      group_ipolicy, allowed_values)
3758

    
3759
  if ipolicy_disk_templates is not None:
3760
    ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
3761
  if ipolicy_vcpu_ratio is not None:
3762
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
3763
  if ipolicy_spindle_ratio is not None:
3764
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
3765

    
3766
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
3767

    
3768
  if not group_ipolicy and fill_all:
3769
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
3770

    
3771
  return ipolicy_out
3772

    
3773

    
3774
def _SerializeGenericInfo(buf, data, level, afterkey=False):
3775
  """Formatting core of L{PrintGenericInfo}.
3776

3777
  @param buf: (string) stream to accumulate the result into
3778
  @param data: data to format
3779
  @type level: int
3780
  @param level: depth in the data hierarchy, used for indenting
3781
  @type afterkey: bool
3782
  @param afterkey: True when we are in the middle of a line after a key (used
3783
      to properly add newlines or indentation)
3784

3785
  """
3786
  baseind = "  "
3787
  if isinstance(data, dict):
3788
    if not data:
3789
      buf.write("\n")
3790
    else:
3791
      if afterkey:
3792
        buf.write("\n")
3793
        doindent = True
3794
      else:
3795
        doindent = False
3796
      for key in sorted(data):
3797
        if doindent:
3798
          buf.write(baseind * level)
3799
        else:
3800
          doindent = True
3801
        buf.write(key)
3802
        buf.write(": ")
3803
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
3804
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
3805
    # list of tuples (an ordered dictionary)
3806
    if afterkey:
3807
      buf.write("\n")
3808
      doindent = True
3809
    else:
3810
      doindent = False
3811
    for (key, val) in data:
3812
      if doindent:
3813
        buf.write(baseind * level)
3814
      else:
3815
        doindent = True
3816
      buf.write(key)
3817
      buf.write(": ")
3818
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
3819
  elif isinstance(data, list):
3820
    if not data:
3821
      buf.write("\n")
3822
    else:
3823
      if afterkey:
3824
        buf.write("\n")
3825
        doindent = True
3826
      else:
3827
        doindent = False
3828
      for item in data:
3829
        if doindent:
3830
          buf.write(baseind * level)
3831
        else:
3832
          doindent = True
3833
        buf.write("-")
3834
        buf.write(baseind[1:])
3835
        _SerializeGenericInfo(buf, item, level + 1)
3836
  else:
3837
    # This branch should be only taken for strings, but it's practically
3838
    # impossible to guarantee that no other types are produced somewhere
3839
    buf.write(str(data))
3840
    buf.write("\n")
3841

    
3842

    
3843
def PrintGenericInfo(data):
3844
  """Print information formatted according to the hierarchy.
3845

3846
  The output is a valid YAML string.
3847

3848
  @param data: the data to print. It's a hierarchical structure whose elements
3849
      can be:
3850
        - dictionaries, where keys are strings and values are of any of the
3851
          types listed here
3852
        - lists of pairs (key, value), where key is a string and value is of
3853
          any of the types listed here; it's a way to encode ordered
3854
          dictionaries
3855
        - lists of any of the types listed here
3856
        - strings
3857

3858
  """
3859
  buf = StringIO()
3860
  _SerializeGenericInfo(buf, data, 0)
3861
  ToStdout(buf.getvalue().rstrip("\n"))