Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ a8deb185

History | View | Annotate | Download (136.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HOTPLUG_OPT",
99
  "HOTPLUG_IF_POSSIBLE_OPT",
100
  "KEEPDISKS_OPT",
101
  "HVLIST_OPT",
102
  "HVOPTS_OPT",
103
  "HYPERVISOR_OPT",
104
  "IALLOCATOR_OPT",
105
  "DEFAULT_IALLOCATOR_OPT",
106
  "IDENTIFY_DEFAULTS_OPT",
107
  "IGNORE_CONSIST_OPT",
108
  "IGNORE_ERRORS_OPT",
109
  "IGNORE_FAILURES_OPT",
110
  "IGNORE_OFFLINE_OPT",
111
  "IGNORE_REMOVE_FAILURES_OPT",
112
  "IGNORE_SECONDARIES_OPT",
113
  "IGNORE_SIZE_OPT",
114
  "INCLUDEDEFAULTS_OPT",
115
  "INTERVAL_OPT",
116
  "MAC_PREFIX_OPT",
117
  "MAINTAIN_NODE_HEALTH_OPT",
118
  "MASTER_NETDEV_OPT",
119
  "MASTER_NETMASK_OPT",
120
  "MC_OPT",
121
  "MIGRATION_MODE_OPT",
122
  "MODIFY_ETCHOSTS_OPT",
123
  "NET_OPT",
124
  "NETWORK_OPT",
125
  "NETWORK6_OPT",
126
  "NEW_CLUSTER_CERT_OPT",
127
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
128
  "NEW_CONFD_HMAC_KEY_OPT",
129
  "NEW_RAPI_CERT_OPT",
130
  "NEW_PRIMARY_OPT",
131
  "NEW_SECONDARY_OPT",
132
  "NEW_SPICE_CERT_OPT",
133
  "NIC_PARAMS_OPT",
134
  "NOCONFLICTSCHECK_OPT",
135
  "NODE_FORCE_JOIN_OPT",
136
  "NODE_LIST_OPT",
137
  "NODE_PLACEMENT_OPT",
138
  "NODEGROUP_OPT",
139
  "NODE_PARAMS_OPT",
140
  "NODE_POWERED_OPT",
141
  "NODRBD_STORAGE_OPT",
142
  "NOHDR_OPT",
143
  "NOIPCHECK_OPT",
144
  "NO_INSTALL_OPT",
145
  "NONAMECHECK_OPT",
146
  "NOLVM_STORAGE_OPT",
147
  "NOMODIFY_ETCHOSTS_OPT",
148
  "NOMODIFY_SSH_SETUP_OPT",
149
  "NONICS_OPT",
150
  "NONLIVE_OPT",
151
  "NONPLUS1_OPT",
152
  "NORUNTIME_CHGS_OPT",
153
  "NOSHUTDOWN_OPT",
154
  "NOSTART_OPT",
155
  "NOSSH_KEYCHECK_OPT",
156
  "NOVOTING_OPT",
157
  "NO_REMEMBER_OPT",
158
  "NWSYNC_OPT",
159
  "OFFLINE_INST_OPT",
160
  "ONLINE_INST_OPT",
161
  "ON_PRIMARY_OPT",
162
  "ON_SECONDARY_OPT",
163
  "OFFLINE_OPT",
164
  "OSPARAMS_OPT",
165
  "OS_OPT",
166
  "OS_SIZE_OPT",
167
  "OOB_TIMEOUT_OPT",
168
  "POWER_DELAY_OPT",
169
  "PREALLOC_WIPE_DISKS_OPT",
170
  "PRIMARY_IP_VERSION_OPT",
171
  "PRIMARY_ONLY_OPT",
172
  "PRIORITY_OPT",
173
  "RAPI_CERT_OPT",
174
  "READD_OPT",
175
  "REASON_OPT",
176
  "REBOOT_TYPE_OPT",
177
  "REMOVE_INSTANCE_OPT",
178
  "REMOVE_RESERVED_IPS_OPT",
179
  "REMOVE_UIDS_OPT",
180
  "RESERVED_LVS_OPT",
181
  "RUNTIME_MEM_OPT",
182
  "ROMAN_OPT",
183
  "SECONDARY_IP_OPT",
184
  "SECONDARY_ONLY_OPT",
185
  "SELECT_OS_OPT",
186
  "SEP_OPT",
187
  "SHOWCMD_OPT",
188
  "SHOW_MACHINE_OPT",
189
  "SHUTDOWN_TIMEOUT_OPT",
190
  "SINGLE_NODE_OPT",
191
  "SPECS_CPU_COUNT_OPT",
192
  "SPECS_DISK_COUNT_OPT",
193
  "SPECS_DISK_SIZE_OPT",
194
  "SPECS_MEM_SIZE_OPT",
195
  "SPECS_NIC_COUNT_OPT",
196
  "SPLIT_ISPECS_OPTS",
197
  "IPOLICY_STD_SPECS_OPT",
198
  "IPOLICY_DISK_TEMPLATES",
199
  "IPOLICY_VCPU_RATIO",
200
  "SPICE_CACERT_OPT",
201
  "SPICE_CERT_OPT",
202
  "SRC_DIR_OPT",
203
  "SRC_NODE_OPT",
204
  "SUBMIT_OPT",
205
  "STARTUP_PAUSED_OPT",
206
  "STATIC_OPT",
207
  "SYNC_OPT",
208
  "TAG_ADD_OPT",
209
  "TAG_SRC_OPT",
210
  "TIMEOUT_OPT",
211
  "TO_GROUP_OPT",
212
  "UIDPOOL_OPT",
213
  "USEUNITS_OPT",
214
  "USE_EXTERNAL_MIP_SCRIPT",
215
  "USE_REPL_NET_OPT",
216
  "VERBOSE_OPT",
217
  "VG_NAME_OPT",
218
  "WFSYNC_OPT",
219
  "YES_DOIT_OPT",
220
  "DISK_STATE_OPT",
221
  "HV_STATE_OPT",
222
  "IGNORE_IPOLICY_OPT",
223
  "INSTANCE_POLICY_OPTS",
224
  # Generic functions for CLI programs
225
  "ConfirmOperation",
226
  "CreateIPolicyFromOpts",
227
  "GenericMain",
228
  "GenericInstanceCreate",
229
  "GenericList",
230
  "GenericListFields",
231
  "GetClient",
232
  "GetOnlineNodes",
233
  "JobExecutor",
234
  "JobSubmittedException",
235
  "ParseTimespec",
236
  "RunWhileClusterStopped",
237
  "SubmitOpCode",
238
  "SubmitOrSend",
239
  "UsesRPC",
240
  # Formatting functions
241
  "ToStderr", "ToStdout",
242
  "FormatError",
243
  "FormatQueryResult",
244
  "FormatParamsDictInfo",
245
  "FormatPolicyInfo",
246
  "PrintIPolicyCommand",
247
  "PrintGenericInfo",
248
  "GenerateTable",
249
  "AskUser",
250
  "FormatTimestamp",
251
  "FormatLogMessage",
252
  # Tags functions
253
  "ListTags",
254
  "AddTags",
255
  "RemoveTags",
256
  # command line options support infrastructure
257
  "ARGS_MANY_INSTANCES",
258
  "ARGS_MANY_NODES",
259
  "ARGS_MANY_GROUPS",
260
  "ARGS_MANY_NETWORKS",
261
  "ARGS_NONE",
262
  "ARGS_ONE_INSTANCE",
263
  "ARGS_ONE_NODE",
264
  "ARGS_ONE_GROUP",
265
  "ARGS_ONE_OS",
266
  "ARGS_ONE_NETWORK",
267
  "ArgChoice",
268
  "ArgCommand",
269
  "ArgFile",
270
  "ArgGroup",
271
  "ArgHost",
272
  "ArgInstance",
273
  "ArgJobId",
274
  "ArgNetwork",
275
  "ArgNode",
276
  "ArgOs",
277
  "ArgExtStorage",
278
  "ArgSuggest",
279
  "ArgUnknown",
280
  "OPT_COMPL_INST_ADD_NODES",
281
  "OPT_COMPL_MANY_NODES",
282
  "OPT_COMPL_ONE_IALLOCATOR",
283
  "OPT_COMPL_ONE_INSTANCE",
284
  "OPT_COMPL_ONE_NODE",
285
  "OPT_COMPL_ONE_NODEGROUP",
286
  "OPT_COMPL_ONE_NETWORK",
287
  "OPT_COMPL_ONE_OS",
288
  "OPT_COMPL_ONE_EXTSTORAGE",
289
  "cli_option",
290
  "SplitNodeOption",
291
  "CalculateOSNames",
292
  "ParseFields",
293
  "COMMON_CREATE_OPTS",
294
  ]
295

    
296
NO_PREFIX = "no_"
297
UN_PREFIX = "-"
298

    
299
#: Priorities (sorted)
300
_PRIORITY_NAMES = [
301
  ("low", constants.OP_PRIO_LOW),
302
  ("normal", constants.OP_PRIO_NORMAL),
303
  ("high", constants.OP_PRIO_HIGH),
304
  ]
305

    
306
#: Priority dictionary for easier lookup
307
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
308
# we migrate to Python 2.6
309
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
310

    
311
# Query result status for clients
312
(QR_NORMAL,
313
 QR_UNKNOWN,
314
 QR_INCOMPLETE) = range(3)
315

    
316
#: Maximum batch size for ChooseJob
317
_CHOOSE_BATCH = 25
318

    
319

    
320
# constants used to create InstancePolicy dictionary
321
TISPECS_GROUP_TYPES = {
322
  constants.ISPECS_MIN: constants.VTYPE_INT,
323
  constants.ISPECS_MAX: constants.VTYPE_INT,
324
  }
325

    
326
TISPECS_CLUSTER_TYPES = {
327
  constants.ISPECS_MIN: constants.VTYPE_INT,
328
  constants.ISPECS_MAX: constants.VTYPE_INT,
329
  constants.ISPECS_STD: constants.VTYPE_INT,
330
  }
331

    
332
#: User-friendly names for query2 field types
333
_QFT_NAMES = {
334
  constants.QFT_UNKNOWN: "Unknown",
335
  constants.QFT_TEXT: "Text",
336
  constants.QFT_BOOL: "Boolean",
337
  constants.QFT_NUMBER: "Number",
338
  constants.QFT_UNIT: "Storage size",
339
  constants.QFT_TIMESTAMP: "Timestamp",
340
  constants.QFT_OTHER: "Custom",
341
  }
342

    
343

    
344
class _Argument:
345
  def __init__(self, min=0, max=None): # pylint: disable=W0622
346
    self.min = min
347
    self.max = max
348

    
349
  def __repr__(self):
350
    return ("<%s min=%s max=%s>" %
351
            (self.__class__.__name__, self.min, self.max))
352

    
353

    
354
class ArgSuggest(_Argument):
355
  """Suggesting argument.
356

357
  Value can be any of the ones passed to the constructor.
358

359
  """
360
  # pylint: disable=W0622
361
  def __init__(self, min=0, max=None, choices=None):
362
    _Argument.__init__(self, min=min, max=max)
363
    self.choices = choices
364

    
365
  def __repr__(self):
366
    return ("<%s min=%s max=%s choices=%r>" %
367
            (self.__class__.__name__, self.min, self.max, self.choices))
368

    
369

    
370
class ArgChoice(ArgSuggest):
371
  """Choice argument.
372

373
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
374
  but value must be one of the choices.
375

376
  """
377

    
378

    
379
class ArgUnknown(_Argument):
380
  """Unknown argument to program (e.g. determined at runtime).
381

382
  """
383

    
384

    
385
class ArgInstance(_Argument):
386
  """Instances argument.
387

388
  """
389

    
390

    
391
class ArgNode(_Argument):
392
  """Node argument.
393

394
  """
395

    
396

    
397
class ArgNetwork(_Argument):
398
  """Network argument.
399

400
  """
401

    
402

    
403
class ArgGroup(_Argument):
404
  """Node group argument.
405

406
  """
407

    
408

    
409
class ArgJobId(_Argument):
410
  """Job ID argument.
411

412
  """
413

    
414

    
415
class ArgFile(_Argument):
416
  """File path argument.
417

418
  """
419

    
420

    
421
class ArgCommand(_Argument):
422
  """Command argument.
423

424
  """
425

    
426

    
427
class ArgHost(_Argument):
428
  """Host argument.
429

430
  """
431

    
432

    
433
class ArgOs(_Argument):
434
  """OS argument.
435

436
  """
437

    
438

    
439
class ArgExtStorage(_Argument):
440
  """ExtStorage argument.
441

442
  """
443

    
444

    
445
ARGS_NONE = []
446
ARGS_MANY_INSTANCES = [ArgInstance()]
447
ARGS_MANY_NETWORKS = [ArgNetwork()]
448
ARGS_MANY_NODES = [ArgNode()]
449
ARGS_MANY_GROUPS = [ArgGroup()]
450
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
451
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
452
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
453
# TODO
454
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
455
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
456

    
457

    
458
def _ExtractTagsObject(opts, args):
459
  """Extract the tag type object.
460

461
  Note that this function will modify its args parameter.
462

463
  """
464
  if not hasattr(opts, "tag_type"):
465
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
466
  kind = opts.tag_type
467
  if kind == constants.TAG_CLUSTER:
468
    retval = kind, None
469
  elif kind in (constants.TAG_NODEGROUP,
470
                constants.TAG_NODE,
471
                constants.TAG_NETWORK,
472
                constants.TAG_INSTANCE):
473
    if not args:
474
      raise errors.OpPrereqError("no arguments passed to the command",
475
                                 errors.ECODE_INVAL)
476
    name = args.pop(0)
477
    retval = kind, name
478
  else:
479
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
480
  return retval
481

    
482

    
483
def _ExtendTags(opts, args):
484
  """Extend the args if a source file has been given.
485

486
  This function will extend the tags with the contents of the file
487
  passed in the 'tags_source' attribute of the opts parameter. A file
488
  named '-' will be replaced by stdin.
489

490
  """
491
  fname = opts.tags_source
492
  if fname is None:
493
    return
494
  if fname == "-":
495
    new_fh = sys.stdin
496
  else:
497
    new_fh = open(fname, "r")
498
  new_data = []
499
  try:
500
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
501
    # because of python bug 1633941
502
    while True:
503
      line = new_fh.readline()
504
      if not line:
505
        break
506
      new_data.append(line.strip())
507
  finally:
508
    new_fh.close()
509
  args.extend(new_data)
510

    
511

    
512
def ListTags(opts, args):
513
  """List the tags on a given object.
514

515
  This is a generic implementation that knows how to deal with all
516
  three cases of tag objects (cluster, node, instance). The opts
517
  argument is expected to contain a tag_type field denoting what
518
  object type we work on.
519

520
  """
521
  kind, name = _ExtractTagsObject(opts, args)
522
  cl = GetClient(query=True)
523
  result = cl.QueryTags(kind, name)
524
  result = list(result)
525
  result.sort()
526
  for tag in result:
527
    ToStdout(tag)
528

    
529

    
530
def AddTags(opts, args):
531
  """Add tags on a given object.
532

533
  This is a generic implementation that knows how to deal with all
534
  three cases of tag objects (cluster, node, instance). The opts
535
  argument is expected to contain a tag_type field denoting what
536
  object type we work on.
537

538
  """
539
  kind, name = _ExtractTagsObject(opts, args)
540
  _ExtendTags(opts, args)
541
  if not args:
542
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
543
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
544
  SubmitOrSend(op, opts)
545

    
546

    
547
def RemoveTags(opts, args):
548
  """Remove tags from a given object.
549

550
  This is a generic implementation that knows how to deal with all
551
  three cases of tag objects (cluster, node, instance). The opts
552
  argument is expected to contain a tag_type field denoting what
553
  object type we work on.
554

555
  """
556
  kind, name = _ExtractTagsObject(opts, args)
557
  _ExtendTags(opts, args)
558
  if not args:
559
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
560
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
561
  SubmitOrSend(op, opts)
562

    
563

    
564
def check_unit(option, opt, value): # pylint: disable=W0613
565
  """OptParsers custom converter for units.
566

567
  """
568
  try:
569
    return utils.ParseUnit(value)
570
  except errors.UnitParseError, err:
571
    raise OptionValueError("option %s: %s" % (opt, err))
572

    
573

    
574
def _SplitKeyVal(opt, data, parse_prefixes):
575
  """Convert a KeyVal string into a dict.
576

577
  This function will convert a key=val[,...] string into a dict. Empty
578
  values will be converted specially: keys which have the prefix 'no_'
579
  will have the value=False and the prefix stripped, keys with the prefix
580
  "-" will have value=None and the prefix stripped, and the others will
581
  have value=True.
582

583
  @type opt: string
584
  @param opt: a string holding the option name for which we process the
585
      data, used in building error messages
586
  @type data: string
587
  @param data: a string of the format key=val,key=val,...
588
  @type parse_prefixes: bool
589
  @param parse_prefixes: whether to handle prefixes specially
590
  @rtype: dict
591
  @return: {key=val, key=val}
592
  @raises errors.ParameterError: if there are duplicate keys
593

594
  """
595
  kv_dict = {}
596
  if data:
597
    for elem in utils.UnescapeAndSplit(data, sep=","):
598
      if "=" in elem:
599
        key, val = elem.split("=", 1)
600
      elif parse_prefixes:
601
        if elem.startswith(NO_PREFIX):
602
          key, val = elem[len(NO_PREFIX):], False
603
        elif elem.startswith(UN_PREFIX):
604
          key, val = elem[len(UN_PREFIX):], None
605
        else:
606
          key, val = elem, True
607
      else:
608
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
609
                                    (elem, opt))
610
      if key in kv_dict:
611
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
612
                                    (key, opt))
613
      kv_dict[key] = val
614
  return kv_dict
615

    
616

    
617
def _SplitIdentKeyVal(opt, value, parse_prefixes):
618
  """Helper function to parse "ident:key=val,key=val" options.
619

620
  @type opt: string
621
  @param opt: option name, used in error messages
622
  @type value: string
623
  @param value: expected to be in the format "ident:key=val,key=val,..."
624
  @type parse_prefixes: bool
625
  @param parse_prefixes: whether to handle prefixes specially (see
626
      L{_SplitKeyVal})
627
  @rtype: tuple
628
  @return: (ident, {key=val, key=val})
629
  @raises errors.ParameterError: in case of duplicates or other parsing errors
630

631
  """
632
  if ":" not in value:
633
    ident, rest = value, ""
634
  else:
635
    ident, rest = value.split(":", 1)
636

    
637
  if parse_prefixes and ident.startswith(NO_PREFIX):
638
    if rest:
639
      msg = "Cannot pass options when removing parameter groups: %s" % value
640
      raise errors.ParameterError(msg)
641
    retval = (ident[len(NO_PREFIX):], False)
642
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
643
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
644
    if rest:
645
      msg = "Cannot pass options when removing parameter groups: %s" % value
646
      raise errors.ParameterError(msg)
647
    retval = (ident[len(UN_PREFIX):], None)
648
  else:
649
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
650
    retval = (ident, kv_dict)
651
  return retval
652

    
653

    
654
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
655
  """Custom parser for ident:key=val,key=val options.
656

657
  This will store the parsed values as a tuple (ident, {key: val}). As such,
658
  multiple uses of this option via action=append is possible.
659

660
  """
661
  return _SplitIdentKeyVal(opt, value, True)
662

    
663

    
664
def check_key_val(option, opt, value):  # pylint: disable=W0613
665
  """Custom parser class for key=val,key=val options.
666

667
  This will store the parsed values as a dict {key: val}.
668

669
  """
670
  return _SplitKeyVal(opt, value, True)
671

    
672

    
673
def _SplitListKeyVal(opt, value):
674
  retval = {}
675
  for elem in value.split("/"):
676
    if not elem:
677
      raise errors.ParameterError("Empty section in option '%s'" % opt)
678
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
679
    if ident in retval:
680
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
681
             (ident, opt, elem))
682
      raise errors.ParameterError(msg)
683
    retval[ident] = valdict
684
  return retval
685

    
686

    
687
def check_multilist_ident_key_val(_, opt, value):
688
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
689

690
  @rtype: list of dictionary
691
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
692

693
  """
694
  retval = []
695
  for line in value.split("//"):
696
    retval.append(_SplitListKeyVal(opt, line))
697
  return retval
698

    
699

    
700
def check_bool(option, opt, value): # pylint: disable=W0613
701
  """Custom parser for yes/no options.
702

703
  This will store the parsed value as either True or False.
704

705
  """
706
  value = value.lower()
707
  if value == constants.VALUE_FALSE or value == "no":
708
    return False
709
  elif value == constants.VALUE_TRUE or value == "yes":
710
    return True
711
  else:
712
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
713

    
714

    
715
def check_list(option, opt, value): # pylint: disable=W0613
716
  """Custom parser for comma-separated lists.
717

718
  """
719
  # we have to make this explicit check since "".split(",") is [""],
720
  # not an empty list :(
721
  if not value:
722
    return []
723
  else:
724
    return utils.UnescapeAndSplit(value)
725

    
726

    
727
def check_maybefloat(option, opt, value): # pylint: disable=W0613
728
  """Custom parser for float numbers which might be also defaults.
729

730
  """
731
  value = value.lower()
732

    
733
  if value == constants.VALUE_DEFAULT:
734
    return value
735
  else:
736
    return float(value)
737

    
738

    
739
# completion_suggestion is normally a list. Using numeric values not evaluating
740
# to False for dynamic completion.
741
(OPT_COMPL_MANY_NODES,
742
 OPT_COMPL_ONE_NODE,
743
 OPT_COMPL_ONE_INSTANCE,
744
 OPT_COMPL_ONE_OS,
745
 OPT_COMPL_ONE_EXTSTORAGE,
746
 OPT_COMPL_ONE_IALLOCATOR,
747
 OPT_COMPL_ONE_NETWORK,
748
 OPT_COMPL_INST_ADD_NODES,
749
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
750

    
751
OPT_COMPL_ALL = compat.UniqueFrozenset([
752
  OPT_COMPL_MANY_NODES,
753
  OPT_COMPL_ONE_NODE,
754
  OPT_COMPL_ONE_INSTANCE,
755
  OPT_COMPL_ONE_OS,
756
  OPT_COMPL_ONE_EXTSTORAGE,
757
  OPT_COMPL_ONE_IALLOCATOR,
758
  OPT_COMPL_ONE_NETWORK,
759
  OPT_COMPL_INST_ADD_NODES,
760
  OPT_COMPL_ONE_NODEGROUP,
761
  ])
762

    
763

    
764
class CliOption(Option):
765
  """Custom option class for optparse.
766

767
  """
768
  ATTRS = Option.ATTRS + [
769
    "completion_suggest",
770
    ]
771
  TYPES = Option.TYPES + (
772
    "multilistidentkeyval",
773
    "identkeyval",
774
    "keyval",
775
    "unit",
776
    "bool",
777
    "list",
778
    "maybefloat",
779
    )
780
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
781
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
782
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
783
  TYPE_CHECKER["keyval"] = check_key_val
784
  TYPE_CHECKER["unit"] = check_unit
785
  TYPE_CHECKER["bool"] = check_bool
786
  TYPE_CHECKER["list"] = check_list
787
  TYPE_CHECKER["maybefloat"] = check_maybefloat
788

    
789

    
790
# optparse.py sets make_option, so we do it for our own option class, too
791
cli_option = CliOption
792

    
793

    
794
_YORNO = "yes|no"
795

    
796
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
797
                       help="Increase debugging level")
798

    
799
NOHDR_OPT = cli_option("--no-headers", default=False,
800
                       action="store_true", dest="no_headers",
801
                       help="Don't display column headers")
802

    
803
SEP_OPT = cli_option("--separator", default=None,
804
                     action="store", dest="separator",
805
                     help=("Separator between output fields"
806
                           " (defaults to one space)"))
807

    
808
USEUNITS_OPT = cli_option("--units", default=None,
809
                          dest="units", choices=("h", "m", "g", "t"),
810
                          help="Specify units for output (one of h/m/g/t)")
811

    
812
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
813
                        type="string", metavar="FIELDS",
814
                        help="Comma separated list of output fields")
815

    
816
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
817
                       default=False, help="Force the operation")
818

    
819
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
820
                         default=False, help="Do not require confirmation")
821

    
822
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
823
                                  action="store_true", default=False,
824
                                  help=("Ignore offline nodes and do as much"
825
                                        " as possible"))
826

    
827
TAG_ADD_OPT = cli_option("--tags", dest="tags",
828
                         default=None, help="Comma-separated list of instance"
829
                                            " tags")
830

    
831
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
832
                         default=None, help="File with tag names")
833

    
834
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
835
                        default=False, action="store_true",
836
                        help=("Submit the job and return the job ID, but"
837
                              " don't wait for the job to finish"))
838

    
839
SYNC_OPT = cli_option("--sync", dest="do_locking",
840
                      default=False, action="store_true",
841
                      help=("Grab locks while doing the queries"
842
                            " in order to ensure more consistent results"))
843

    
844
DRY_RUN_OPT = cli_option("--dry-run", default=False,
845
                         action="store_true",
846
                         help=("Do not execute the operation, just run the"
847
                               " check steps and verify if it could be"
848
                               " executed"))
849

    
850
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
851
                         action="store_true",
852
                         help="Increase the verbosity of the operation")
853

    
854
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
855
                              action="store_true", dest="simulate_errors",
856
                              help="Debugging option that makes the operation"
857
                              " treat most runtime checks as failed")
858

    
859
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
860
                        default=True, action="store_false",
861
                        help="Don't wait for sync (DANGEROUS!)")
862

    
863
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
864
                        default=False, action="store_true",
865
                        help="Wait for disks to sync")
866

    
867
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
868
                             action="store_true", default=False,
869
                             help="Enable offline instance")
870

    
871
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
872
                              action="store_true", default=False,
873
                              help="Disable down instance")
874

    
875
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
876
                               help=("Custom disk setup (%s)" %
877
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
878
                               default=None, metavar="TEMPL",
879
                               choices=list(constants.DISK_TEMPLATES))
880

    
881
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
882
                        help="Do not create any network cards for"
883
                        " the instance")
884

    
885
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
886
                               help="Relative path under default cluster-wide"
887
                               " file storage dir to store file-based disks",
888
                               default=None, metavar="<DIR>")
889

    
890
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
891
                                  help="Driver to use for image files",
892
                                  default=None, metavar="<DRIVER>",
893
                                  choices=list(constants.FILE_DRIVER))
894

    
895
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
896
                            help="Select nodes for the instance automatically"
897
                            " using the <NAME> iallocator plugin",
898
                            default=None, type="string",
899
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
900

    
901
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
902
                                    metavar="<NAME>",
903
                                    help="Set the default instance"
904
                                    " allocator plugin",
905
                                    default=None, type="string",
906
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
907

    
908
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
909
                    metavar="<os>",
910
                    completion_suggest=OPT_COMPL_ONE_OS)
911

    
912
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
913
                          type="keyval", default={},
914
                          help="OS parameters")
915

    
916
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
917
                               action="store_true", default=False,
918
                               help="Force an unknown variant")
919

    
920
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
921
                            action="store_true", default=False,
922
                            help="Do not install the OS (will"
923
                            " enable no-start)")
924

    
925
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
926
                                dest="allow_runtime_chgs",
927
                                default=True, action="store_false",
928
                                help="Don't allow runtime changes")
929

    
930
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
931
                         type="keyval", default={},
932
                         help="Backend parameters")
933

    
934
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
935
                        default={}, dest="hvparams",
936
                        help="Hypervisor parameters")
937

    
938
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
939
                             help="Disk template parameters, in the format"
940
                             " template:option=value,option=value,...",
941
                             type="identkeyval", action="append", default=[])
942

    
943
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
944
                                 type="keyval", default={},
945
                                 help="Memory size specs: list of key=value,"
946
                                " where key is one of min, max, std"
947
                                 " (in MB or using a unit)")
948

    
949
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
950
                                 type="keyval", default={},
951
                                 help="CPU count specs: list of key=value,"
952
                                 " where key is one of min, max, std")
953

    
954
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
955
                                  dest="ispecs_disk_count",
956
                                  type="keyval", default={},
957
                                  help="Disk count specs: list of key=value,"
958
                                  " where key is one of min, max, std")
959

    
960
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
961
                                 type="keyval", default={},
962
                                 help="Disk size specs: list of key=value,"
963
                                 " where key is one of min, max, std"
964
                                 " (in MB or using a unit)")
965

    
966
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
967
                                 type="keyval", default={},
968
                                 help="NIC count specs: list of key=value,"
969
                                 " where key is one of min, max, std")
970

    
971
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
972
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
973
                                      dest="ipolicy_bounds_specs",
974
                                      type="multilistidentkeyval", default=None,
975
                                      help="Complete instance specs limits")
976

    
977
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
978
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
979
                                   dest="ipolicy_std_specs",
980
                                   type="keyval", default=None,
981
                                   help="Complte standard instance specs")
982

    
983
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
984
                                    dest="ipolicy_disk_templates",
985
                                    type="list", default=None,
986
                                    help="Comma-separated list of"
987
                                    " enabled disk templates")
988

    
989
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
990
                                 dest="ipolicy_vcpu_ratio",
991
                                 type="maybefloat", default=None,
992
                                 help="The maximum allowed vcpu-to-cpu ratio")
993

    
994
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
995
                                   dest="ipolicy_spindle_ratio",
996
                                   type="maybefloat", default=None,
997
                                   help=("The maximum allowed instances to"
998
                                         " spindle ratio"))
999

    
1000
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1001
                            help="Hypervisor and hypervisor options, in the"
1002
                            " format hypervisor:option=value,option=value,...",
1003
                            default=None, type="identkeyval")
1004

    
1005
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1006
                        help="Hypervisor and hypervisor options, in the"
1007
                        " format hypervisor:option=value,option=value,...",
1008
                        default=[], action="append", type="identkeyval")
1009

    
1010
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1011
                           action="store_false",
1012
                           help="Don't check that the instance's IP"
1013
                           " is alive")
1014

    
1015
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1016
                             default=True, action="store_false",
1017
                             help="Don't check that the instance's name"
1018
                             " is resolvable")
1019

    
1020
NET_OPT = cli_option("--net",
1021
                     help="NIC parameters", default=[],
1022
                     dest="nics", action="append", type="identkeyval")
1023

    
1024
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1025
                      dest="disks", action="append", type="identkeyval")
1026

    
1027
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1028
                         help="Comma-separated list of disks"
1029
                         " indices to act on (e.g. 0,2) (optional,"
1030
                         " defaults to all disks)")
1031

    
1032
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1033
                         help="Enforces a single-disk configuration using the"
1034
                         " given disk size, in MiB unless a suffix is used",
1035
                         default=None, type="unit", metavar="<size>")
1036

    
1037
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1038
                                dest="ignore_consistency",
1039
                                action="store_true", default=False,
1040
                                help="Ignore the consistency of the disks on"
1041
                                " the secondary")
1042

    
1043
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1044
                                dest="allow_failover",
1045
                                action="store_true", default=False,
1046
                                help="If migration is not possible fallback to"
1047
                                     " failover")
1048

    
1049
NONLIVE_OPT = cli_option("--non-live", dest="live",
1050
                         default=True, action="store_false",
1051
                         help="Do a non-live migration (this usually means"
1052
                         " freeze the instance, save the state, transfer and"
1053
                         " only then resume running on the secondary node)")
1054

    
1055
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1056
                                default=None,
1057
                                choices=list(constants.HT_MIGRATION_MODES),
1058
                                help="Override default migration mode (choose"
1059
                                " either live or non-live")
1060

    
1061
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1062
                                help="Target node and optional secondary node",
1063
                                metavar="<pnode>[:<snode>]",
1064
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1065

    
1066
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1067
                           action="append", metavar="<node>",
1068
                           help="Use only this node (can be used multiple"
1069
                           " times, if not given defaults to all nodes)",
1070
                           completion_suggest=OPT_COMPL_ONE_NODE)
1071

    
1072
NODEGROUP_OPT_NAME = "--node-group"
1073
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1074
                           dest="nodegroup",
1075
                           help="Node group (name or uuid)",
1076
                           metavar="<nodegroup>",
1077
                           default=None, type="string",
1078
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1079

    
1080
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1081
                             metavar="<node>",
1082
                             completion_suggest=OPT_COMPL_ONE_NODE)
1083

    
1084
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1085
                         action="store_false",
1086
                         help="Don't start the instance after creation")
1087

    
1088
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1089
                         action="store_true", default=False,
1090
                         help="Show command instead of executing it")
1091

    
1092
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1093
                         default=False, action="store_true",
1094
                         help="Instead of performing the migration/failover,"
1095
                         " try to recover from a failed cleanup. This is safe"
1096
                         " to run even if the instance is healthy, but it"
1097
                         " will create extra replication traffic and "
1098
                         " disrupt briefly the replication (like during the"
1099
                         " migration/failover")
1100

    
1101
STATIC_OPT = cli_option("-s", "--static", dest="static",
1102
                        action="store_true", default=False,
1103
                        help="Only show configuration data, not runtime data")
1104

    
1105
ALL_OPT = cli_option("--all", dest="show_all",
1106
                     default=False, action="store_true",
1107
                     help="Show info on all instances on the cluster."
1108
                     " This can take a long time to run, use wisely")
1109

    
1110
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1111
                           action="store_true", default=False,
1112
                           help="Interactive OS reinstall, lists available"
1113
                           " OS templates for selection")
1114

    
1115
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1116
                                 action="store_true", default=False,
1117
                                 help="Remove the instance from the cluster"
1118
                                 " configuration even if there are failures"
1119
                                 " during the removal process")
1120

    
1121
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1122
                                        dest="ignore_remove_failures",
1123
                                        action="store_true", default=False,
1124
                                        help="Remove the instance from the"
1125
                                        " cluster configuration even if there"
1126
                                        " are failures during the removal"
1127
                                        " process")
1128

    
1129
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1130
                                 action="store_true", default=False,
1131
                                 help="Remove the instance from the cluster")
1132

    
1133
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1134
                               help="Specifies the new node for the instance",
1135
                               metavar="NODE", default=None,
1136
                               completion_suggest=OPT_COMPL_ONE_NODE)
1137

    
1138
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1139
                               help="Specifies the new secondary node",
1140
                               metavar="NODE", default=None,
1141
                               completion_suggest=OPT_COMPL_ONE_NODE)
1142

    
1143
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1144
                             help="Specifies the new primary node",
1145
                             metavar="<node>", default=None,
1146
                             completion_suggest=OPT_COMPL_ONE_NODE)
1147

    
1148
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1149
                            default=False, action="store_true",
1150
                            help="Replace the disk(s) on the primary"
1151
                                 " node (applies only to internally mirrored"
1152
                                 " disk templates, e.g. %s)" %
1153
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1154

    
1155
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1156
                              default=False, action="store_true",
1157
                              help="Replace the disk(s) on the secondary"
1158
                                   " node (applies only to internally mirrored"
1159
                                   " disk templates, e.g. %s)" %
1160
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1161

    
1162
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1163
                              default=False, action="store_true",
1164
                              help="Lock all nodes and auto-promote as needed"
1165
                              " to MC status")
1166

    
1167
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1168
                              default=False, action="store_true",
1169
                              help="Automatically replace faulty disks"
1170
                                   " (applies only to internally mirrored"
1171
                                   " disk templates, e.g. %s)" %
1172
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1173

    
1174
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1175
                             default=False, action="store_true",
1176
                             help="Ignore current recorded size"
1177
                             " (useful for forcing activation when"
1178
                             " the recorded size is wrong)")
1179

    
1180
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1181
                          metavar="<node>",
1182
                          completion_suggest=OPT_COMPL_ONE_NODE)
1183

    
1184
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1185
                         metavar="<dir>")
1186

    
1187
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1188
                              help="Specify the secondary ip for the node",
1189
                              metavar="ADDRESS", default=None)
1190

    
1191
READD_OPT = cli_option("--readd", dest="readd",
1192
                       default=False, action="store_true",
1193
                       help="Readd old node after replacing it")
1194

    
1195
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1196
                                default=True, action="store_false",
1197
                                help="Disable SSH key fingerprint checking")
1198

    
1199
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1200
                                 default=False, action="store_true",
1201
                                 help="Force the joining of a node")
1202

    
1203
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1204
                    type="bool", default=None, metavar=_YORNO,
1205
                    help="Set the master_candidate flag on the node")
1206

    
1207
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1208
                         type="bool", default=None,
1209
                         help=("Set the offline flag on the node"
1210
                               " (cluster does not communicate with offline"
1211
                               " nodes)"))
1212

    
1213
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1214
                         type="bool", default=None,
1215
                         help=("Set the drained flag on the node"
1216
                               " (excluded from allocation operations)"))
1217

    
1218
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1219
                              type="bool", default=None, metavar=_YORNO,
1220
                              help="Set the master_capable flag on the node")
1221

    
1222
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1223
                          type="bool", default=None, metavar=_YORNO,
1224
                          help="Set the vm_capable flag on the node")
1225

    
1226
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1227
                             type="bool", default=None, metavar=_YORNO,
1228
                             help="Set the allocatable flag on a volume")
1229

    
1230
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1231
                               help="Disable support for lvm based instances"
1232
                               " (cluster-wide)",
1233
                               action="store_false", default=True)
1234

    
1235
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1236
                            dest="enabled_hypervisors",
1237
                            help="Comma-separated list of hypervisors",
1238
                            type="string", default=None)
1239

    
1240
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1241
                                        dest="enabled_disk_templates",
1242
                                        help="Comma-separated list of "
1243
                                             "disk templates",
1244
                                        type="string", default=None)
1245

    
1246
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1247
                            type="keyval", default={},
1248
                            help="NIC parameters")
1249

    
1250
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1251
                         dest="candidate_pool_size", type="int",
1252
                         help="Set the candidate pool size")
1253

    
1254
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1255
                         help=("Enables LVM and specifies the volume group"
1256
                               " name (cluster-wide) for disk allocation"
1257
                               " [%s]" % constants.DEFAULT_VG),
1258
                         metavar="VG", default=None)
1259

    
1260
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1261
                          help="Destroy cluster", action="store_true")
1262

    
1263
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1264
                          help="Skip node agreement check (dangerous)",
1265
                          action="store_true", default=False)
1266

    
1267
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1268
                            help="Specify the mac prefix for the instance IP"
1269
                            " addresses, in the format XX:XX:XX",
1270
                            metavar="PREFIX",
1271
                            default=None)
1272

    
1273
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1274
                               help="Specify the node interface (cluster-wide)"
1275
                               " on which the master IP address will be added"
1276
                               " (cluster init default: %s)" %
1277
                               constants.DEFAULT_BRIDGE,
1278
                               metavar="NETDEV",
1279
                               default=None)
1280

    
1281
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1282
                                help="Specify the netmask of the master IP",
1283
                                metavar="NETMASK",
1284
                                default=None)
1285

    
1286
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1287
                                     dest="use_external_mip_script",
1288
                                     help="Specify whether to run a"
1289
                                     " user-provided script for the master"
1290
                                     " IP address turnup and"
1291
                                     " turndown operations",
1292
                                     type="bool", metavar=_YORNO, default=None)
1293

    
1294
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1295
                                help="Specify the default directory (cluster-"
1296
                                "wide) for storing the file-based disks [%s]" %
1297
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1298
                                metavar="DIR",
1299
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1300

    
1301
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1302
  "--shared-file-storage-dir",
1303
  dest="shared_file_storage_dir",
1304
  help="Specify the default directory (cluster-wide) for storing the"
1305
  " shared file-based disks [%s]" %
1306
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1307
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1308

    
1309
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1310
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1311
                                   action="store_false", default=True)
1312

    
1313
MODIFY_ETCHOSTS_OPT = \
1314
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1315
            default=None, type="bool",
1316
            help="Defines whether the cluster should autonomously modify"
1317
            " and keep in sync the /etc/hosts file of the nodes")
1318

    
1319
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1320
                                    help="Don't initialize SSH keys",
1321
                                    action="store_false", default=True)
1322

    
1323
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1324
                             help="Enable parseable error messages",
1325
                             action="store_true", default=False)
1326

    
1327
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1328
                          help="Skip N+1 memory redundancy tests",
1329
                          action="store_true", default=False)
1330

    
1331
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1332
                             help="Type of reboot: soft/hard/full",
1333
                             default=constants.INSTANCE_REBOOT_HARD,
1334
                             metavar="<REBOOT>",
1335
                             choices=list(constants.REBOOT_TYPES))
1336

    
1337
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1338
                                    dest="ignore_secondaries",
1339
                                    default=False, action="store_true",
1340
                                    help="Ignore errors from secondaries")
1341

    
1342
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1343
                            action="store_false", default=True,
1344
                            help="Don't shutdown the instance (unsafe)")
1345

    
1346
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1347
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1348
                         help="Maximum time to wait")
1349

    
1350
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1351
                                  dest="shutdown_timeout", type="int",
1352
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1353
                                  help="Maximum time to wait for instance"
1354
                                  " shutdown")
1355

    
1356
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1357
                          default=None,
1358
                          help=("Number of seconds between repetions of the"
1359
                                " command"))
1360

    
1361
EARLY_RELEASE_OPT = cli_option("--early-release",
1362
                               dest="early_release", default=False,
1363
                               action="store_true",
1364
                               help="Release the locks on the secondary"
1365
                               " node(s) early")
1366

    
1367
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1368
                                  dest="new_cluster_cert",
1369
                                  default=False, action="store_true",
1370
                                  help="Generate a new cluster certificate")
1371

    
1372
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1373
                           default=None,
1374
                           help="File containing new RAPI certificate")
1375

    
1376
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1377
                               default=None, action="store_true",
1378
                               help=("Generate a new self-signed RAPI"
1379
                                     " certificate"))
1380

    
1381
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1382
                            default=None,
1383
                            help="File containing new SPICE certificate")
1384

    
1385
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1386
                              default=None,
1387
                              help="File containing the certificate of the CA"
1388
                              " which signed the SPICE certificate")
1389

    
1390
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1391
                                dest="new_spice_cert", default=None,
1392
                                action="store_true",
1393
                                help=("Generate a new self-signed SPICE"
1394
                                      " certificate"))
1395

    
1396
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1397
                                    dest="new_confd_hmac_key",
1398
                                    default=False, action="store_true",
1399
                                    help=("Create a new HMAC key for %s" %
1400
                                          constants.CONFD))
1401

    
1402
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1403
                                       dest="cluster_domain_secret",
1404
                                       default=None,
1405
                                       help=("Load new new cluster domain"
1406
                                             " secret from file"))
1407

    
1408
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1409
                                           dest="new_cluster_domain_secret",
1410
                                           default=False, action="store_true",
1411
                                           help=("Create a new cluster domain"
1412
                                                 " secret"))
1413

    
1414
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1415
                              dest="use_replication_network",
1416
                              help="Whether to use the replication network"
1417
                              " for talking to the nodes",
1418
                              action="store_true", default=False)
1419

    
1420
MAINTAIN_NODE_HEALTH_OPT = \
1421
    cli_option("--maintain-node-health", dest="maintain_node_health",
1422
               metavar=_YORNO, default=None, type="bool",
1423
               help="Configure the cluster to automatically maintain node"
1424
               " health, by shutting down unknown instances, shutting down"
1425
               " unknown DRBD devices, etc.")
1426

    
1427
IDENTIFY_DEFAULTS_OPT = \
1428
    cli_option("--identify-defaults", dest="identify_defaults",
1429
               default=False, action="store_true",
1430
               help="Identify which saved instance parameters are equal to"
1431
               " the current cluster defaults and set them as such, instead"
1432
               " of marking them as overridden")
1433

    
1434
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1435
                         action="store", dest="uid_pool",
1436
                         help=("A list of user-ids or user-id"
1437
                               " ranges separated by commas"))
1438

    
1439
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1440
                          action="store", dest="add_uids",
1441
                          help=("A list of user-ids or user-id"
1442
                                " ranges separated by commas, to be"
1443
                                " added to the user-id pool"))
1444

    
1445
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1446
                             action="store", dest="remove_uids",
1447
                             help=("A list of user-ids or user-id"
1448
                                   " ranges separated by commas, to be"
1449
                                   " removed from the user-id pool"))
1450

    
1451
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1452
                              action="store", dest="reserved_lvs",
1453
                              help=("A comma-separated list of reserved"
1454
                                    " logical volumes names, that will be"
1455
                                    " ignored by cluster verify"))
1456

    
1457
ROMAN_OPT = cli_option("--roman",
1458
                       dest="roman_integers", default=False,
1459
                       action="store_true",
1460
                       help="Use roman numbers for positive integers")
1461

    
1462
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1463
                             action="store", default=None,
1464
                             help="Specifies usermode helper for DRBD")
1465

    
1466
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1467
                                action="store_false", default=True,
1468
                                help="Disable support for DRBD")
1469

    
1470
PRIMARY_IP_VERSION_OPT = \
1471
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1472
               action="store", dest="primary_ip_version",
1473
               metavar="%d|%d" % (constants.IP4_VERSION,
1474
                                  constants.IP6_VERSION),
1475
               help="Cluster-wide IP version for primary IP")
1476

    
1477
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1478
                              action="store_true",
1479
                              help="Show machine name for every line in output")
1480

    
1481
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1482
                              action="store_true",
1483
                              help=("Hide successful results and show failures"
1484
                                    " only (determined by the exit code)"))
1485

    
1486
REASON_OPT = cli_option("--reason", default=None,
1487
                        help="The reason for executing the command")
1488

    
1489

    
1490
def _PriorityOptionCb(option, _, value, parser):
1491
  """Callback for processing C{--priority} option.
1492

1493
  """
1494
  value = _PRIONAME_TO_VALUE[value]
1495

    
1496
  setattr(parser.values, option.dest, value)
1497

    
1498

    
1499
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1500
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1501
                          choices=_PRIONAME_TO_VALUE.keys(),
1502
                          action="callback", type="choice",
1503
                          callback=_PriorityOptionCb,
1504
                          help="Priority for opcode processing")
1505

    
1506
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1507
                        type="bool", default=None, metavar=_YORNO,
1508
                        help="Sets the hidden flag on the OS")
1509

    
1510
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1511
                        type="bool", default=None, metavar=_YORNO,
1512
                        help="Sets the blacklisted flag on the OS")
1513

    
1514
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1515
                                     type="bool", metavar=_YORNO,
1516
                                     dest="prealloc_wipe_disks",
1517
                                     help=("Wipe disks prior to instance"
1518
                                           " creation"))
1519

    
1520
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1521
                             type="keyval", default=None,
1522
                             help="Node parameters")
1523

    
1524
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1525
                              action="store", metavar="POLICY", default=None,
1526
                              help="Allocation policy for the node group")
1527

    
1528
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1529
                              type="bool", metavar=_YORNO,
1530
                              dest="node_powered",
1531
                              help="Specify if the SoR for node is powered")
1532

    
1533
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1534
                             default=constants.OOB_TIMEOUT,
1535
                             help="Maximum time to wait for out-of-band helper")
1536

    
1537
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1538
                             default=constants.OOB_POWER_DELAY,
1539
                             help="Time in seconds to wait between power-ons")
1540

    
1541
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1542
                              action="store_true", default=False,
1543
                              help=("Whether command argument should be treated"
1544
                                    " as filter"))
1545

    
1546
NO_REMEMBER_OPT = cli_option("--no-remember",
1547
                             dest="no_remember",
1548
                             action="store_true", default=False,
1549
                             help="Perform but do not record the change"
1550
                             " in the configuration")
1551

    
1552
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1553
                              default=False, action="store_true",
1554
                              help="Evacuate primary instances only")
1555

    
1556
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1557
                                default=False, action="store_true",
1558
                                help="Evacuate secondary instances only"
1559
                                     " (applies only to internally mirrored"
1560
                                     " disk templates, e.g. %s)" %
1561
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1562

    
1563
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1564
                                action="store_true", default=False,
1565
                                help="Pause instance at startup")
1566

    
1567
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1568
                          help="Destination node group (name or uuid)",
1569
                          default=None, action="append",
1570
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1571

    
1572
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1573
                               action="append", dest="ignore_errors",
1574
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1575
                               help="Error code to be ignored")
1576

    
1577
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1578
                            action="append",
1579
                            help=("Specify disk state information in the"
1580
                                  " format"
1581
                                  " storage_type/identifier:option=value,...;"
1582
                                  " note this is unused for now"),
1583
                            type="identkeyval")
1584

    
1585
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1586
                          action="append",
1587
                          help=("Specify hypervisor state information in the"
1588
                                " format hypervisor:option=value,...;"
1589
                                " note this is unused for now"),
1590
                          type="identkeyval")
1591

    
1592
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1593
                                action="store_true", default=False,
1594
                                help="Ignore instance policy violations")
1595

    
1596
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1597
                             help="Sets the instance's runtime memory,"
1598
                             " ballooning it up or down to the new value",
1599
                             default=None, type="unit", metavar="<size>")
1600

    
1601
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1602
                          action="store_true", default=False,
1603
                          help="Marks the grow as absolute instead of the"
1604
                          " (default) relative mode")
1605

    
1606
NETWORK_OPT = cli_option("--network",
1607
                         action="store", default=None, dest="network",
1608
                         help="IP network in CIDR notation")
1609

    
1610
GATEWAY_OPT = cli_option("--gateway",
1611
                         action="store", default=None, dest="gateway",
1612
                         help="IP address of the router (gateway)")
1613

    
1614
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1615
                                  action="store", default=None,
1616
                                  dest="add_reserved_ips",
1617
                                  help="Comma-separated list of"
1618
                                  " reserved IPs to add")
1619

    
1620
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1621
                                     action="store", default=None,
1622
                                     dest="remove_reserved_ips",
1623
                                     help="Comma-delimited list of"
1624
                                     " reserved IPs to remove")
1625

    
1626
NETWORK6_OPT = cli_option("--network6",
1627
                          action="store", default=None, dest="network6",
1628
                          help="IP network in CIDR notation")
1629

    
1630
GATEWAY6_OPT = cli_option("--gateway6",
1631
                          action="store", default=None, dest="gateway6",
1632
                          help="IP6 address of the router (gateway)")
1633

    
1634
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1635
                                  dest="conflicts_check",
1636
                                  default=True,
1637
                                  action="store_false",
1638
                                  help="Don't check for conflicting IPs")
1639

    
1640
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1641
                                 default=False, action="store_true",
1642
                                 help="Include default values")
1643

    
1644
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1645
                         action="store_true", default=False,
1646
                         help="Hotplug supported devices (NICs and Disks)")
1647

    
1648
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
1649
                                     dest="hotplug_if_possible",
1650
                                     action="store_true", default=False,
1651
                                     help="Hotplug devices in case"
1652
                                          " hotplug is supported")
1653

    
1654
KEEPDISKS_OPT = cli_option("--keep-disks", dest="keep_disks",
1655
                           action="store_true", default=False,
1656
                           help="Do not remove disks")
1657

    
1658
#: Options provided by all commands
1659
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1660

    
1661
# common options for creating instances. add and import then add their own
1662
# specific ones.
1663
COMMON_CREATE_OPTS = [
1664
  BACKEND_OPT,
1665
  DISK_OPT,
1666
  DISK_TEMPLATE_OPT,
1667
  FILESTORE_DIR_OPT,
1668
  FILESTORE_DRIVER_OPT,
1669
  HYPERVISOR_OPT,
1670
  IALLOCATOR_OPT,
1671
  NET_OPT,
1672
  NODE_PLACEMENT_OPT,
1673
  NOIPCHECK_OPT,
1674
  NOCONFLICTSCHECK_OPT,
1675
  NONAMECHECK_OPT,
1676
  NONICS_OPT,
1677
  NWSYNC_OPT,
1678
  OSPARAMS_OPT,
1679
  OS_SIZE_OPT,
1680
  SUBMIT_OPT,
1681
  TAG_ADD_OPT,
1682
  DRY_RUN_OPT,
1683
  PRIORITY_OPT,
1684
  ]
1685

    
1686
# common instance policy options
1687
INSTANCE_POLICY_OPTS = [
1688
  IPOLICY_BOUNDS_SPECS_OPT,
1689
  IPOLICY_DISK_TEMPLATES,
1690
  IPOLICY_VCPU_RATIO,
1691
  IPOLICY_SPINDLE_RATIO,
1692
  ]
1693

    
1694
# instance policy split specs options
1695
SPLIT_ISPECS_OPTS = [
1696
  SPECS_CPU_COUNT_OPT,
1697
  SPECS_DISK_COUNT_OPT,
1698
  SPECS_DISK_SIZE_OPT,
1699
  SPECS_MEM_SIZE_OPT,
1700
  SPECS_NIC_COUNT_OPT,
1701
  ]
1702

    
1703

    
1704
class _ShowUsage(Exception):
1705
  """Exception class for L{_ParseArgs}.
1706

1707
  """
1708
  def __init__(self, exit_error):
1709
    """Initializes instances of this class.
1710

1711
    @type exit_error: bool
1712
    @param exit_error: Whether to report failure on exit
1713

1714
    """
1715
    Exception.__init__(self)
1716
    self.exit_error = exit_error
1717

    
1718

    
1719
class _ShowVersion(Exception):
1720
  """Exception class for L{_ParseArgs}.
1721

1722
  """
1723

    
1724

    
1725
def _ParseArgs(binary, argv, commands, aliases, env_override):
1726
  """Parser for the command line arguments.
1727

1728
  This function parses the arguments and returns the function which
1729
  must be executed together with its (modified) arguments.
1730

1731
  @param binary: Script name
1732
  @param argv: Command line arguments
1733
  @param commands: Dictionary containing command definitions
1734
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1735
  @param env_override: list of env variables allowed for default args
1736
  @raise _ShowUsage: If usage description should be shown
1737
  @raise _ShowVersion: If version should be shown
1738

1739
  """
1740
  assert not (env_override - set(commands))
1741
  assert not (set(aliases.keys()) & set(commands.keys()))
1742

    
1743
  if len(argv) > 1:
1744
    cmd = argv[1]
1745
  else:
1746
    # No option or command given
1747
    raise _ShowUsage(exit_error=True)
1748

    
1749
  if cmd == "--version":
1750
    raise _ShowVersion()
1751
  elif cmd == "--help":
1752
    raise _ShowUsage(exit_error=False)
1753
  elif not (cmd in commands or cmd in aliases):
1754
    raise _ShowUsage(exit_error=True)
1755

    
1756
  # get command, unalias it, and look it up in commands
1757
  if cmd in aliases:
1758
    if aliases[cmd] not in commands:
1759
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1760
                                   " command '%s'" % (cmd, aliases[cmd]))
1761

    
1762
    cmd = aliases[cmd]
1763

    
1764
  if cmd in env_override:
1765
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1766
    env_args = os.environ.get(args_env_name)
1767
    if env_args:
1768
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1769

    
1770
  func, args_def, parser_opts, usage, description = commands[cmd]
1771
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1772
                        description=description,
1773
                        formatter=TitledHelpFormatter(),
1774
                        usage="%%prog %s %s" % (cmd, usage))
1775
  parser.disable_interspersed_args()
1776
  options, args = parser.parse_args(args=argv[2:])
1777

    
1778
  if not _CheckArguments(cmd, args_def, args):
1779
    return None, None, None
1780

    
1781
  return func, options, args
1782

    
1783

    
1784
def _FormatUsage(binary, commands):
1785
  """Generates a nice description of all commands.
1786

1787
  @param binary: Script name
1788
  @param commands: Dictionary containing command definitions
1789

1790
  """
1791
  # compute the max line length for cmd + usage
1792
  mlen = min(60, max(map(len, commands)))
1793

    
1794
  yield "Usage: %s {command} [options...] [argument...]" % binary
1795
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1796
  yield ""
1797
  yield "Commands:"
1798

    
1799
  # and format a nice command list
1800
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1801
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1802
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1803
    for line in help_lines:
1804
      yield " %-*s   %s" % (mlen, "", line)
1805

    
1806
  yield ""
1807

    
1808

    
1809
def _CheckArguments(cmd, args_def, args):
1810
  """Verifies the arguments using the argument definition.
1811

1812
  Algorithm:
1813

1814
    1. Abort with error if values specified by user but none expected.
1815

1816
    1. For each argument in definition
1817

1818
      1. Keep running count of minimum number of values (min_count)
1819
      1. Keep running count of maximum number of values (max_count)
1820
      1. If it has an unlimited number of values
1821

1822
        1. Abort with error if it's not the last argument in the definition
1823

1824
    1. If last argument has limited number of values
1825

1826
      1. Abort with error if number of values doesn't match or is too large
1827

1828
    1. Abort with error if user didn't pass enough values (min_count)
1829

1830
  """
1831
  if args and not args_def:
1832
    ToStderr("Error: Command %s expects no arguments", cmd)
1833
    return False
1834

    
1835
  min_count = None
1836
  max_count = None
1837
  check_max = None
1838

    
1839
  last_idx = len(args_def) - 1
1840

    
1841
  for idx, arg in enumerate(args_def):
1842
    if min_count is None:
1843
      min_count = arg.min
1844
    elif arg.min is not None:
1845
      min_count += arg.min
1846

    
1847
    if max_count is None:
1848
      max_count = arg.max
1849
    elif arg.max is not None:
1850
      max_count += arg.max
1851

    
1852
    if idx == last_idx:
1853
      check_max = (arg.max is not None)
1854

    
1855
    elif arg.max is None:
1856
      raise errors.ProgrammerError("Only the last argument can have max=None")
1857

    
1858
  if check_max:
1859
    # Command with exact number of arguments
1860
    if (min_count is not None and max_count is not None and
1861
        min_count == max_count and len(args) != min_count):
1862
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1863
      return False
1864

    
1865
    # Command with limited number of arguments
1866
    if max_count is not None and len(args) > max_count:
1867
      ToStderr("Error: Command %s expects only %d argument(s)",
1868
               cmd, max_count)
1869
      return False
1870

    
1871
  # Command with some required arguments
1872
  if min_count is not None and len(args) < min_count:
1873
    ToStderr("Error: Command %s expects at least %d argument(s)",
1874
             cmd, min_count)
1875
    return False
1876

    
1877
  return True
1878

    
1879

    
1880
def SplitNodeOption(value):
1881
  """Splits the value of a --node option.
1882

1883
  """
1884
  if value and ":" in value:
1885
    return value.split(":", 1)
1886
  else:
1887
    return (value, None)
1888

    
1889

    
1890
def CalculateOSNames(os_name, os_variants):
1891
  """Calculates all the names an OS can be called, according to its variants.
1892

1893
  @type os_name: string
1894
  @param os_name: base name of the os
1895
  @type os_variants: list or None
1896
  @param os_variants: list of supported variants
1897
  @rtype: list
1898
  @return: list of valid names
1899

1900
  """
1901
  if os_variants:
1902
    return ["%s+%s" % (os_name, v) for v in os_variants]
1903
  else:
1904
    return [os_name]
1905

    
1906

    
1907
def ParseFields(selected, default):
1908
  """Parses the values of "--field"-like options.
1909

1910
  @type selected: string or None
1911
  @param selected: User-selected options
1912
  @type default: list
1913
  @param default: Default fields
1914

1915
  """
1916
  if selected is None:
1917
    return default
1918

    
1919
  if selected.startswith("+"):
1920
    return default + selected[1:].split(",")
1921

    
1922
  return selected.split(",")
1923

    
1924

    
1925
UsesRPC = rpc.RunWithRPC
1926

    
1927

    
1928
def AskUser(text, choices=None):
1929
  """Ask the user a question.
1930

1931
  @param text: the question to ask
1932

1933
  @param choices: list with elements tuples (input_char, return_value,
1934
      description); if not given, it will default to: [('y', True,
1935
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1936
      note that the '?' char is reserved for help
1937

1938
  @return: one of the return values from the choices list; if input is
1939
      not possible (i.e. not running with a tty, we return the last
1940
      entry from the list
1941

1942
  """
1943
  if choices is None:
1944
    choices = [("y", True, "Perform the operation"),
1945
               ("n", False, "Do not perform the operation")]
1946
  if not choices or not isinstance(choices, list):
1947
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1948
  for entry in choices:
1949
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1950
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1951

    
1952
  answer = choices[-1][1]
1953
  new_text = []
1954
  for line in text.splitlines():
1955
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1956
  text = "\n".join(new_text)
1957
  try:
1958
    f = file("/dev/tty", "a+")
1959
  except IOError:
1960
    return answer
1961
  try:
1962
    chars = [entry[0] for entry in choices]
1963
    chars[-1] = "[%s]" % chars[-1]
1964
    chars.append("?")
1965
    maps = dict([(entry[0], entry[1]) for entry in choices])
1966
    while True:
1967
      f.write(text)
1968
      f.write("\n")
1969
      f.write("/".join(chars))
1970
      f.write(": ")
1971
      line = f.readline(2).strip().lower()
1972
      if line in maps:
1973
        answer = maps[line]
1974
        break
1975
      elif line == "?":
1976
        for entry in choices:
1977
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1978
        f.write("\n")
1979
        continue
1980
  finally:
1981
    f.close()
1982
  return answer
1983

    
1984

    
1985
class JobSubmittedException(Exception):
1986
  """Job was submitted, client should exit.
1987

1988
  This exception has one argument, the ID of the job that was
1989
  submitted. The handler should print this ID.
1990

1991
  This is not an error, just a structured way to exit from clients.
1992

1993
  """
1994

    
1995

    
1996
def SendJob(ops, cl=None):
1997
  """Function to submit an opcode without waiting for the results.
1998

1999
  @type ops: list
2000
  @param ops: list of opcodes
2001
  @type cl: luxi.Client
2002
  @param cl: the luxi client to use for communicating with the master;
2003
             if None, a new client will be created
2004

2005
  """
2006
  if cl is None:
2007
    cl = GetClient()
2008

    
2009
  job_id = cl.SubmitJob(ops)
2010

    
2011
  return job_id
2012

    
2013

    
2014
def GenericPollJob(job_id, cbs, report_cbs):
2015
  """Generic job-polling function.
2016

2017
  @type job_id: number
2018
  @param job_id: Job ID
2019
  @type cbs: Instance of L{JobPollCbBase}
2020
  @param cbs: Data callbacks
2021
  @type report_cbs: Instance of L{JobPollReportCbBase}
2022
  @param report_cbs: Reporting callbacks
2023

2024
  """
2025
  prev_job_info = None
2026
  prev_logmsg_serial = None
2027

    
2028
  status = None
2029

    
2030
  while True:
2031
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2032
                                      prev_logmsg_serial)
2033
    if not result:
2034
      # job not found, go away!
2035
      raise errors.JobLost("Job with id %s lost" % job_id)
2036

    
2037
    if result == constants.JOB_NOTCHANGED:
2038
      report_cbs.ReportNotChanged(job_id, status)
2039

    
2040
      # Wait again
2041
      continue
2042

    
2043
    # Split result, a tuple of (field values, log entries)
2044
    (job_info, log_entries) = result
2045
    (status, ) = job_info
2046

    
2047
    if log_entries:
2048
      for log_entry in log_entries:
2049
        (serial, timestamp, log_type, message) = log_entry
2050
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2051
                                    log_type, message)
2052
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2053

    
2054
    # TODO: Handle canceled and archived jobs
2055
    elif status in (constants.JOB_STATUS_SUCCESS,
2056
                    constants.JOB_STATUS_ERROR,
2057
                    constants.JOB_STATUS_CANCELING,
2058
                    constants.JOB_STATUS_CANCELED):
2059
      break
2060

    
2061
    prev_job_info = job_info
2062

    
2063
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2064
  if not jobs:
2065
    raise errors.JobLost("Job with id %s lost" % job_id)
2066

    
2067
  status, opstatus, result = jobs[0]
2068

    
2069
  if status == constants.JOB_STATUS_SUCCESS:
2070
    return result
2071

    
2072
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2073
    raise errors.OpExecError("Job was canceled")
2074

    
2075
  has_ok = False
2076
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2077
    if status == constants.OP_STATUS_SUCCESS:
2078
      has_ok = True
2079
    elif status == constants.OP_STATUS_ERROR:
2080
      errors.MaybeRaise(msg)
2081

    
2082
      if has_ok:
2083
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2084
                                 (idx, msg))
2085

    
2086
      raise errors.OpExecError(str(msg))
2087

    
2088
  # default failure mode
2089
  raise errors.OpExecError(result)
2090

    
2091

    
2092
class JobPollCbBase:
2093
  """Base class for L{GenericPollJob} callbacks.
2094

2095
  """
2096
  def __init__(self):
2097
    """Initializes this class.
2098

2099
    """
2100

    
2101
  def WaitForJobChangeOnce(self, job_id, fields,
2102
                           prev_job_info, prev_log_serial):
2103
    """Waits for changes on a job.
2104

2105
    """
2106
    raise NotImplementedError()
2107

    
2108
  def QueryJobs(self, job_ids, fields):
2109
    """Returns the selected fields for the selected job IDs.
2110

2111
    @type job_ids: list of numbers
2112
    @param job_ids: Job IDs
2113
    @type fields: list of strings
2114
    @param fields: Fields
2115

2116
    """
2117
    raise NotImplementedError()
2118

    
2119

    
2120
class JobPollReportCbBase:
2121
  """Base class for L{GenericPollJob} reporting callbacks.
2122

2123
  """
2124
  def __init__(self):
2125
    """Initializes this class.
2126

2127
    """
2128

    
2129
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2130
    """Handles a log message.
2131

2132
    """
2133
    raise NotImplementedError()
2134

    
2135
  def ReportNotChanged(self, job_id, status):
2136
    """Called for if a job hasn't changed in a while.
2137

2138
    @type job_id: number
2139
    @param job_id: Job ID
2140
    @type status: string or None
2141
    @param status: Job status if available
2142

2143
    """
2144
    raise NotImplementedError()
2145

    
2146

    
2147
class _LuxiJobPollCb(JobPollCbBase):
2148
  def __init__(self, cl):
2149
    """Initializes this class.
2150

2151
    """
2152
    JobPollCbBase.__init__(self)
2153
    self.cl = cl
2154

    
2155
  def WaitForJobChangeOnce(self, job_id, fields,
2156
                           prev_job_info, prev_log_serial):
2157
    """Waits for changes on a job.
2158

2159
    """
2160
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2161
                                        prev_job_info, prev_log_serial)
2162

    
2163
  def QueryJobs(self, job_ids, fields):
2164
    """Returns the selected fields for the selected job IDs.
2165

2166
    """
2167
    return self.cl.QueryJobs(job_ids, fields)
2168

    
2169

    
2170
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2171
  def __init__(self, feedback_fn):
2172
    """Initializes this class.
2173

2174
    """
2175
    JobPollReportCbBase.__init__(self)
2176

    
2177
    self.feedback_fn = feedback_fn
2178

    
2179
    assert callable(feedback_fn)
2180

    
2181
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2182
    """Handles a log message.
2183

2184
    """
2185
    self.feedback_fn((timestamp, log_type, log_msg))
2186

    
2187
  def ReportNotChanged(self, job_id, status):
2188
    """Called if a job hasn't changed in a while.
2189

2190
    """
2191
    # Ignore
2192

    
2193

    
2194
class StdioJobPollReportCb(JobPollReportCbBase):
2195
  def __init__(self):
2196
    """Initializes this class.
2197

2198
    """
2199
    JobPollReportCbBase.__init__(self)
2200

    
2201
    self.notified_queued = False
2202
    self.notified_waitlock = False
2203

    
2204
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2205
    """Handles a log message.
2206

2207
    """
2208
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2209
             FormatLogMessage(log_type, log_msg))
2210

    
2211
  def ReportNotChanged(self, job_id, status):
2212
    """Called if a job hasn't changed in a while.
2213

2214
    """
2215
    if status is None:
2216
      return
2217

    
2218
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2219
      ToStderr("Job %s is waiting in queue", job_id)
2220
      self.notified_queued = True
2221

    
2222
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2223
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2224
      self.notified_waitlock = True
2225

    
2226

    
2227
def FormatLogMessage(log_type, log_msg):
2228
  """Formats a job message according to its type.
2229

2230
  """
2231
  if log_type != constants.ELOG_MESSAGE:
2232
    log_msg = str(log_msg)
2233

    
2234
  return utils.SafeEncode(log_msg)
2235

    
2236

    
2237
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2238
  """Function to poll for the result of a job.
2239

2240
  @type job_id: job identified
2241
  @param job_id: the job to poll for results
2242
  @type cl: luxi.Client
2243
  @param cl: the luxi client to use for communicating with the master;
2244
             if None, a new client will be created
2245

2246
  """
2247
  if cl is None:
2248
    cl = GetClient()
2249

    
2250
  if reporter is None:
2251
    if feedback_fn:
2252
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2253
    else:
2254
      reporter = StdioJobPollReportCb()
2255
  elif feedback_fn:
2256
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2257

    
2258
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2259

    
2260

    
2261
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2262
  """Legacy function to submit an opcode.
2263

2264
  This is just a simple wrapper over the construction of the processor
2265
  instance. It should be extended to better handle feedback and
2266
  interaction functions.
2267

2268
  """
2269
  if cl is None:
2270
    cl = GetClient()
2271

    
2272
  SetGenericOpcodeOpts([op], opts)
2273

    
2274
  job_id = SendJob([op], cl=cl)
2275

    
2276
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2277
                       reporter=reporter)
2278

    
2279
  return op_results[0]
2280

    
2281

    
2282
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2283
  """Wrapper around SubmitOpCode or SendJob.
2284

2285
  This function will decide, based on the 'opts' parameter, whether to
2286
  submit and wait for the result of the opcode (and return it), or
2287
  whether to just send the job and print its identifier. It is used in
2288
  order to simplify the implementation of the '--submit' option.
2289

2290
  It will also process the opcodes if we're sending the via SendJob
2291
  (otherwise SubmitOpCode does it).
2292

2293
  """
2294
  if opts and opts.submit_only:
2295
    job = [op]
2296
    SetGenericOpcodeOpts(job, opts)
2297
    job_id = SendJob(job, cl=cl)
2298
    raise JobSubmittedException(job_id)
2299
  else:
2300
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2301

    
2302

    
2303
def _InitReasonTrail(op, opts):
2304
  """Builds the first part of the reason trail
2305

2306
  Builds the initial part of the reason trail, adding the user provided reason
2307
  (if it exists) and the name of the command starting the operation.
2308

2309
  @param op: the opcode the reason trail will be added to
2310
  @param opts: the command line options selected by the user
2311

2312
  """
2313
  assert len(sys.argv) >= 2
2314
  trail = []
2315

    
2316
  if opts.reason:
2317
    trail.append((constants.OPCODE_REASON_SRC_USER,
2318
                  opts.reason,
2319
                  utils.EpochNano()))
2320

    
2321
  binary = os.path.basename(sys.argv[0])
2322
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2323
  command = sys.argv[1]
2324
  trail.append((source, command, utils.EpochNano()))
2325
  op.reason = trail
2326

    
2327

    
2328
def SetGenericOpcodeOpts(opcode_list, options):
2329
  """Processor for generic options.
2330

2331
  This function updates the given opcodes based on generic command
2332
  line options (like debug, dry-run, etc.).
2333

2334
  @param opcode_list: list of opcodes
2335
  @param options: command line options or None
2336
  @return: None (in-place modification)
2337

2338
  """
2339
  if not options:
2340
    return
2341
  for op in opcode_list:
2342
    op.debug_level = options.debug
2343
    if hasattr(options, "dry_run"):
2344
      op.dry_run = options.dry_run
2345
    if getattr(options, "priority", None) is not None:
2346
      op.priority = options.priority
2347
    _InitReasonTrail(op, options)
2348

    
2349

    
2350
def GetClient(query=False):
2351
  """Connects to the a luxi socket and returns a client.
2352

2353
  @type query: boolean
2354
  @param query: this signifies that the client will only be
2355
      used for queries; if the build-time parameter
2356
      enable-split-queries is enabled, then the client will be
2357
      connected to the query socket instead of the masterd socket
2358

2359
  """
2360
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2361
  if override_socket:
2362
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2363
      address = pathutils.MASTER_SOCKET
2364
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2365
      address = pathutils.QUERY_SOCKET
2366
    else:
2367
      address = override_socket
2368
  elif query and constants.ENABLE_SPLIT_QUERY:
2369
    address = pathutils.QUERY_SOCKET
2370
  else:
2371
    address = None
2372
  # TODO: Cache object?
2373
  try:
2374
    client = luxi.Client(address=address)
2375
  except luxi.NoMasterError:
2376
    ss = ssconf.SimpleStore()
2377

    
2378
    # Try to read ssconf file
2379
    try:
2380
      ss.GetMasterNode()
2381
    except errors.ConfigurationError:
2382
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2383
                                 " not part of a cluster",
2384
                                 errors.ECODE_INVAL)
2385

    
2386
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2387
    if master != myself:
2388
      raise errors.OpPrereqError("This is not the master node, please connect"
2389
                                 " to node '%s' and rerun the command" %
2390
                                 master, errors.ECODE_INVAL)
2391
    raise
2392
  return client
2393

    
2394

    
2395
def FormatError(err):
2396
  """Return a formatted error message for a given error.
2397

2398
  This function takes an exception instance and returns a tuple
2399
  consisting of two values: first, the recommended exit code, and
2400
  second, a string describing the error message (not
2401
  newline-terminated).
2402

2403
  """
2404
  retcode = 1
2405
  obuf = StringIO()
2406
  msg = str(err)
2407
  if isinstance(err, errors.ConfigurationError):
2408
    txt = "Corrupt configuration file: %s" % msg
2409
    logging.error(txt)
2410
    obuf.write(txt + "\n")
2411
    obuf.write("Aborting.")
2412
    retcode = 2
2413
  elif isinstance(err, errors.HooksAbort):
2414
    obuf.write("Failure: hooks execution failed:\n")
2415
    for node, script, out in err.args[0]:
2416
      if out:
2417
        obuf.write("  node: %s, script: %s, output: %s\n" %
2418
                   (node, script, out))
2419
      else:
2420
        obuf.write("  node: %s, script: %s (no output)\n" %
2421
                   (node, script))
2422
  elif isinstance(err, errors.HooksFailure):
2423
    obuf.write("Failure: hooks general failure: %s" % msg)
2424
  elif isinstance(err, errors.ResolverError):
2425
    this_host = netutils.Hostname.GetSysName()
2426
    if err.args[0] == this_host:
2427
      msg = "Failure: can't resolve my own hostname ('%s')"
2428
    else:
2429
      msg = "Failure: can't resolve hostname '%s'"
2430
    obuf.write(msg % err.args[0])
2431
  elif isinstance(err, errors.OpPrereqError):
2432
    if len(err.args) == 2:
2433
      obuf.write("Failure: prerequisites not met for this"
2434
                 " operation:\nerror type: %s, error details:\n%s" %
2435
                 (err.args[1], err.args[0]))
2436
    else:
2437
      obuf.write("Failure: prerequisites not met for this"
2438
                 " operation:\n%s" % msg)
2439
  elif isinstance(err, errors.OpExecError):
2440
    obuf.write("Failure: command execution error:\n%s" % msg)
2441
  elif isinstance(err, errors.TagError):
2442
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2443
  elif isinstance(err, errors.JobQueueDrainError):
2444
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2445
               " accept new requests\n")
2446
  elif isinstance(err, errors.JobQueueFull):
2447
    obuf.write("Failure: the job queue is full and doesn't accept new"
2448
               " job submissions until old jobs are archived\n")
2449
  elif isinstance(err, errors.TypeEnforcementError):
2450
    obuf.write("Parameter Error: %s" % msg)
2451
  elif isinstance(err, errors.ParameterError):
2452
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2453
  elif isinstance(err, luxi.NoMasterError):
2454
    if err.args[0] == pathutils.MASTER_SOCKET:
2455
      daemon = "the master daemon"
2456
    elif err.args[0] == pathutils.QUERY_SOCKET:
2457
      daemon = "the config daemon"
2458
    else:
2459
      daemon = "socket '%s'" % str(err.args[0])
2460
    obuf.write("Cannot communicate with %s.\nIs the process running"
2461
               " and listening for connections?" % daemon)
2462
  elif isinstance(err, luxi.TimeoutError):
2463
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2464
               " been submitted and will continue to run even if the call"
2465
               " timed out. Useful commands in this situation are \"gnt-job"
2466
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2467
    obuf.write(msg)
2468
  elif isinstance(err, luxi.PermissionError):
2469
    obuf.write("It seems you don't have permissions to connect to the"
2470
               " master daemon.\nPlease retry as a different user.")
2471
  elif isinstance(err, luxi.ProtocolError):
2472
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2473
               "%s" % msg)
2474
  elif isinstance(err, errors.JobLost):
2475
    obuf.write("Error checking job status: %s" % msg)
2476
  elif isinstance(err, errors.QueryFilterParseError):
2477
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2478
    obuf.write("\n".join(err.GetDetails()))
2479
  elif isinstance(err, errors.GenericError):
2480
    obuf.write("Unhandled Ganeti error: %s" % msg)
2481
  elif isinstance(err, JobSubmittedException):
2482
    obuf.write("JobID: %s\n" % err.args[0])
2483
    retcode = 0
2484
  else:
2485
    obuf.write("Unhandled exception: %s" % msg)
2486
  return retcode, obuf.getvalue().rstrip("\n")
2487

    
2488

    
2489
def GenericMain(commands, override=None, aliases=None,
2490
                env_override=frozenset()):
2491
  """Generic main function for all the gnt-* commands.
2492

2493
  @param commands: a dictionary with a special structure, see the design doc
2494
                   for command line handling.
2495
  @param override: if not None, we expect a dictionary with keys that will
2496
                   override command line options; this can be used to pass
2497
                   options from the scripts to generic functions
2498
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2499
  @param env_override: list of environment names which are allowed to submit
2500
                       default args for commands
2501

2502
  """
2503
  # save the program name and the entire command line for later logging
2504
  if sys.argv:
2505
    binary = os.path.basename(sys.argv[0])
2506
    if not binary:
2507
      binary = sys.argv[0]
2508

    
2509
    if len(sys.argv) >= 2:
2510
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2511
    else:
2512
      logname = binary
2513

    
2514
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2515
  else:
2516
    binary = "<unknown program>"
2517
    cmdline = "<unknown>"
2518

    
2519
  if aliases is None:
2520
    aliases = {}
2521

    
2522
  try:
2523
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2524
                                       env_override)
2525
  except _ShowVersion:
2526
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2527
             constants.RELEASE_VERSION)
2528
    return constants.EXIT_SUCCESS
2529
  except _ShowUsage, err:
2530
    for line in _FormatUsage(binary, commands):
2531
      ToStdout(line)
2532

    
2533
    if err.exit_error:
2534
      return constants.EXIT_FAILURE
2535
    else:
2536
      return constants.EXIT_SUCCESS
2537
  except errors.ParameterError, err:
2538
    result, err_msg = FormatError(err)
2539
    ToStderr(err_msg)
2540
    return 1
2541

    
2542
  if func is None: # parse error
2543
    return 1
2544

    
2545
  if override is not None:
2546
    for key, val in override.iteritems():
2547
      setattr(options, key, val)
2548

    
2549
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2550
                     stderr_logging=True)
2551

    
2552
  logging.info("Command line: %s", cmdline)
2553

    
2554
  try:
2555
    result = func(options, args)
2556
  except (errors.GenericError, luxi.ProtocolError,
2557
          JobSubmittedException), err:
2558
    result, err_msg = FormatError(err)
2559
    logging.exception("Error during command processing")
2560
    ToStderr(err_msg)
2561
  except KeyboardInterrupt:
2562
    result = constants.EXIT_FAILURE
2563
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2564
             " might have been submitted and"
2565
             " will continue to run in the background.")
2566
  except IOError, err:
2567
    if err.errno == errno.EPIPE:
2568
      # our terminal went away, we'll exit
2569
      sys.exit(constants.EXIT_FAILURE)
2570
    else:
2571
      raise
2572

    
2573
  return result
2574

    
2575

    
2576
def ParseNicOption(optvalue):
2577
  """Parses the value of the --net option(s).
2578

2579
  """
2580
  try:
2581
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2582
  except (TypeError, ValueError), err:
2583
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2584
                               errors.ECODE_INVAL)
2585

    
2586
  nics = [{}] * nic_max
2587
  for nidx, ndict in optvalue:
2588
    nidx = int(nidx)
2589

    
2590
    if not isinstance(ndict, dict):
2591
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2592
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2593

    
2594
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2595

    
2596
    nics[nidx] = ndict
2597

    
2598
  return nics
2599

    
2600

    
2601
def GenericInstanceCreate(mode, opts, args):
2602
  """Add an instance to the cluster via either creation or import.
2603

2604
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2605
  @param opts: the command line options selected by the user
2606
  @type args: list
2607
  @param args: should contain only one element, the new instance name
2608
  @rtype: int
2609
  @return: the desired exit code
2610

2611
  """
2612
  instance = args[0]
2613

    
2614
  (pnode, snode) = SplitNodeOption(opts.node)
2615

    
2616
  hypervisor = None
2617
  hvparams = {}
2618
  if opts.hypervisor:
2619
    hypervisor, hvparams = opts.hypervisor
2620

    
2621
  if opts.nics:
2622
    nics = ParseNicOption(opts.nics)
2623
  elif opts.no_nics:
2624
    # no nics
2625
    nics = []
2626
  elif mode == constants.INSTANCE_CREATE:
2627
    # default of one nic, all auto
2628
    nics = [{}]
2629
  else:
2630
    # mode == import
2631
    nics = []
2632

    
2633
  if opts.disk_template == constants.DT_DISKLESS:
2634
    if opts.disks or opts.sd_size is not None:
2635
      raise errors.OpPrereqError("Diskless instance but disk"
2636
                                 " information passed", errors.ECODE_INVAL)
2637
    disks = []
2638
  else:
2639
    if (not opts.disks and not opts.sd_size
2640
        and mode == constants.INSTANCE_CREATE):
2641
      raise errors.OpPrereqError("No disk information specified",
2642
                                 errors.ECODE_INVAL)
2643
    if opts.disks and opts.sd_size is not None:
2644
      raise errors.OpPrereqError("Please use either the '--disk' or"
2645
                                 " '-s' option", errors.ECODE_INVAL)
2646
    if opts.sd_size is not None:
2647
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2648

    
2649
    if opts.disks:
2650
      try:
2651
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2652
      except ValueError, err:
2653
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2654
                                   errors.ECODE_INVAL)
2655
      disks = [{}] * disk_max
2656
    else:
2657
      disks = []
2658
    for didx, ddict in opts.disks:
2659
      didx = int(didx)
2660
      if not isinstance(ddict, dict):
2661
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2662
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2663
      elif constants.IDISK_SIZE in ddict:
2664
        if constants.IDISK_ADOPT in ddict:
2665
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2666
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2667
        try:
2668
          ddict[constants.IDISK_SIZE] = \
2669
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2670
        except ValueError, err:
2671
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2672
                                     (didx, err), errors.ECODE_INVAL)
2673
      elif constants.IDISK_ADOPT in ddict:
2674
        if mode == constants.INSTANCE_IMPORT:
2675
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2676
                                     " import", errors.ECODE_INVAL)
2677
        ddict[constants.IDISK_SIZE] = 0
2678
      else:
2679
        raise errors.OpPrereqError("Missing size or adoption source for"
2680
                                   " disk %d" % didx, errors.ECODE_INVAL)
2681
      disks[didx] = ddict
2682

    
2683
  if opts.tags is not None:
2684
    tags = opts.tags.split(",")
2685
  else:
2686
    tags = []
2687

    
2688
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2689
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2690

    
2691
  if mode == constants.INSTANCE_CREATE:
2692
    start = opts.start
2693
    os_type = opts.os
2694
    force_variant = opts.force_variant
2695
    src_node = None
2696
    src_path = None
2697
    no_install = opts.no_install
2698
    identify_defaults = False
2699
  elif mode == constants.INSTANCE_IMPORT:
2700
    start = False
2701
    os_type = None
2702
    force_variant = False
2703
    src_node = opts.src_node
2704
    src_path = opts.src_dir
2705
    no_install = None
2706
    identify_defaults = opts.identify_defaults
2707
  else:
2708
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2709

    
2710
  op = opcodes.OpInstanceCreate(instance_name=instance,
2711
                                disks=disks,
2712
                                disk_template=opts.disk_template,
2713
                                nics=nics,
2714
                                conflicts_check=opts.conflicts_check,
2715
                                pnode=pnode, snode=snode,
2716
                                ip_check=opts.ip_check,
2717
                                name_check=opts.name_check,
2718
                                wait_for_sync=opts.wait_for_sync,
2719
                                file_storage_dir=opts.file_storage_dir,
2720
                                file_driver=opts.file_driver,
2721
                                iallocator=opts.iallocator,
2722
                                hypervisor=hypervisor,
2723
                                hvparams=hvparams,
2724
                                beparams=opts.beparams,
2725
                                osparams=opts.osparams,
2726
                                mode=mode,
2727
                                start=start,
2728
                                os_type=os_type,
2729
                                force_variant=force_variant,
2730
                                src_node=src_node,
2731
                                src_path=src_path,
2732
                                tags=tags,
2733
                                no_install=no_install,
2734
                                identify_defaults=identify_defaults,
2735
                                ignore_ipolicy=opts.ignore_ipolicy)
2736

    
2737
  SubmitOrSend(op, opts)
2738
  return 0
2739

    
2740

    
2741
class _RunWhileClusterStoppedHelper:
2742
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2743

2744
  """
2745
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2746
    """Initializes this class.
2747

2748
    @type feedback_fn: callable
2749
    @param feedback_fn: Feedback function
2750
    @type cluster_name: string
2751
    @param cluster_name: Cluster name
2752
    @type master_node: string
2753
    @param master_node Master node name
2754
    @type online_nodes: list
2755
    @param online_nodes: List of names of online nodes
2756

2757
    """
2758
    self.feedback_fn = feedback_fn
2759
    self.cluster_name = cluster_name
2760
    self.master_node = master_node
2761
    self.online_nodes = online_nodes
2762

    
2763
    self.ssh = ssh.SshRunner(self.cluster_name)
2764

    
2765
    self.nonmaster_nodes = [name for name in online_nodes
2766
                            if name != master_node]
2767

    
2768
    assert self.master_node not in self.nonmaster_nodes
2769

    
2770
  def _RunCmd(self, node_name, cmd):
2771
    """Runs a command on the local or a remote machine.
2772

2773
    @type node_name: string
2774
    @param node_name: Machine name
2775
    @type cmd: list
2776
    @param cmd: Command
2777

2778
    """
2779
    if node_name is None or node_name == self.master_node:
2780
      # No need to use SSH
2781
      result = utils.RunCmd(cmd)
2782
    else:
2783
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2784
                            utils.ShellQuoteArgs(cmd))
2785

    
2786
    if result.failed:
2787
      errmsg = ["Failed to run command %s" % result.cmd]
2788
      if node_name:
2789
        errmsg.append("on node %s" % node_name)
2790
      errmsg.append(": exitcode %s and error %s" %
2791
                    (result.exit_code, result.output))
2792
      raise errors.OpExecError(" ".join(errmsg))
2793

    
2794
  def Call(self, fn, *args):
2795
    """Call function while all daemons are stopped.
2796

2797
    @type fn: callable
2798
    @param fn: Function to be called
2799

2800
    """
2801
    # Pause watcher by acquiring an exclusive lock on watcher state file
2802
    self.feedback_fn("Blocking watcher")
2803
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2804
    try:
2805
      # TODO: Currently, this just blocks. There's no timeout.
2806
      # TODO: Should it be a shared lock?
2807
      watcher_block.Exclusive(blocking=True)
2808

    
2809
      # Stop master daemons, so that no new jobs can come in and all running
2810
      # ones are finished
2811
      self.feedback_fn("Stopping master daemons")
2812
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2813
      try:
2814
        # Stop daemons on all nodes
2815
        for node_name in self.online_nodes:
2816
          self.feedback_fn("Stopping daemons on %s" % node_name)
2817
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2818

    
2819
        # All daemons are shut down now
2820
        try:
2821
          return fn(self, *args)
2822
        except Exception, err:
2823
          _, errmsg = FormatError(err)
2824
          logging.exception("Caught exception")
2825
          self.feedback_fn(errmsg)
2826
          raise
2827
      finally:
2828
        # Start cluster again, master node last
2829
        for node_name in self.nonmaster_nodes + [self.master_node]:
2830
          self.feedback_fn("Starting daemons on %s" % node_name)
2831
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2832
    finally:
2833
      # Resume watcher
2834
      watcher_block.Close()
2835

    
2836

    
2837
def RunWhileClusterStopped(feedback_fn, fn, *args):
2838
  """Calls a function while all cluster daemons are stopped.
2839

2840
  @type feedback_fn: callable
2841
  @param feedback_fn: Feedback function
2842
  @type fn: callable
2843
  @param fn: Function to be called when daemons are stopped
2844

2845
  """
2846
  feedback_fn("Gathering cluster information")
2847

    
2848
  # This ensures we're running on the master daemon
2849
  cl = GetClient()
2850

    
2851
  (cluster_name, master_node) = \
2852
    cl.QueryConfigValues(["cluster_name", "master_node"])
2853

    
2854
  online_nodes = GetOnlineNodes([], cl=cl)
2855

    
2856
  # Don't keep a reference to the client. The master daemon will go away.
2857
  del cl
2858

    
2859
  assert master_node in online_nodes
2860

    
2861
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2862
                                       online_nodes).Call(fn, *args)
2863

    
2864

    
2865
def GenerateTable(headers, fields, separator, data,
2866
                  numfields=None, unitfields=None,
2867
                  units=None):
2868
  """Prints a table with headers and different fields.
2869

2870
  @type headers: dict
2871
  @param headers: dictionary mapping field names to headers for
2872
      the table
2873
  @type fields: list
2874
  @param fields: the field names corresponding to each row in
2875
      the data field
2876
  @param separator: the separator to be used; if this is None,
2877
      the default 'smart' algorithm is used which computes optimal
2878
      field width, otherwise just the separator is used between
2879
      each field
2880
  @type data: list
2881
  @param data: a list of lists, each sublist being one row to be output
2882
  @type numfields: list
2883
  @param numfields: a list with the fields that hold numeric
2884
      values and thus should be right-aligned
2885
  @type unitfields: list
2886
  @param unitfields: a list with the fields that hold numeric
2887
      values that should be formatted with the units field
2888
  @type units: string or None
2889
  @param units: the units we should use for formatting, or None for
2890
      automatic choice (human-readable for non-separator usage, otherwise
2891
      megabytes); this is a one-letter string
2892

2893
  """
2894
  if units is None:
2895
    if separator:
2896
      units = "m"
2897
    else:
2898
      units = "h"
2899

    
2900
  if numfields is None:
2901
    numfields = []
2902
  if unitfields is None:
2903
    unitfields = []
2904

    
2905
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2906
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2907

    
2908
  format_fields = []
2909
  for field in fields:
2910
    if headers and field not in headers:
2911
      # TODO: handle better unknown fields (either revert to old
2912
      # style of raising exception, or deal more intelligently with
2913
      # variable fields)
2914
      headers[field] = field
2915
    if separator is not None:
2916
      format_fields.append("%s")
2917
    elif numfields.Matches(field):
2918
      format_fields.append("%*s")
2919
    else:
2920
      format_fields.append("%-*s")
2921

    
2922
  if separator is None:
2923
    mlens = [0 for name in fields]
2924
    format_str = " ".join(format_fields)
2925
  else:
2926
    format_str = separator.replace("%", "%%").join(format_fields)
2927

    
2928
  for row in data:
2929
    if row is None:
2930
      continue
2931
    for idx, val in enumerate(row):
2932
      if unitfields.Matches(fields[idx]):
2933
        try:
2934
          val = int(val)
2935
        except (TypeError, ValueError):
2936
          pass
2937
        else:
2938
          val = row[idx] = utils.FormatUnit(val, units)
2939
      val = row[idx] = str(val)
2940
      if separator is None:
2941
        mlens[idx] = max(mlens[idx], len(val))
2942

    
2943
  result = []
2944
  if headers:
2945
    args = []
2946
    for idx, name in enumerate(fields):
2947
      hdr = headers[name]
2948
      if separator is None:
2949
        mlens[idx] = max(mlens[idx], len(hdr))
2950
        args.append(mlens[idx])
2951
      args.append(hdr)
2952
    result.append(format_str % tuple(args))
2953

    
2954
  if separator is None:
2955
    assert len(mlens) == len(fields)
2956

    
2957
    if fields and not numfields.Matches(fields[-1]):
2958
      mlens[-1] = 0
2959

    
2960
  for line in data:
2961
    args = []
2962
    if line is None:
2963
      line = ["-" for _ in fields]
2964
    for idx in range(len(fields)):
2965
      if separator is None:
2966
        args.append(mlens[idx])
2967
      args.append(line[idx])
2968
    result.append(format_str % tuple(args))
2969

    
2970
  return result
2971

    
2972

    
2973
def _FormatBool(value):
2974
  """Formats a boolean value as a string.
2975

2976
  """
2977
  if value:
2978
    return "Y"
2979
  return "N"
2980

    
2981

    
2982
#: Default formatting for query results; (callback, align right)
2983
_DEFAULT_FORMAT_QUERY = {
2984
  constants.QFT_TEXT: (str, False),
2985
  constants.QFT_BOOL: (_FormatBool, False),
2986
  constants.QFT_NUMBER: (str, True),
2987
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2988
  constants.QFT_OTHER: (str, False),
2989
  constants.QFT_UNKNOWN: (str, False),
2990
  }
2991

    
2992

    
2993
def _GetColumnFormatter(fdef, override, unit):
2994
  """Returns formatting function for a field.
2995

2996
  @type fdef: L{objects.QueryFieldDefinition}
2997
  @type override: dict
2998
  @param override: Dictionary for overriding field formatting functions,
2999
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3000
  @type unit: string
3001
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3002
  @rtype: tuple; (callable, bool)
3003
  @return: Returns the function to format a value (takes one parameter) and a
3004
    boolean for aligning the value on the right-hand side
3005

3006
  """
3007
  fmt = override.get(fdef.name, None)
3008
  if fmt is not None:
3009
    return fmt
3010

    
3011
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3012

    
3013
  if fdef.kind == constants.QFT_UNIT:
3014
    # Can't keep this information in the static dictionary
3015
    return (lambda value: utils.FormatUnit(value, unit), True)
3016

    
3017
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3018
  if fmt is not None:
3019
    return fmt
3020

    
3021
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3022

    
3023

    
3024
class _QueryColumnFormatter:
3025
  """Callable class for formatting fields of a query.
3026

3027
  """
3028
  def __init__(self, fn, status_fn, verbose):
3029
    """Initializes this class.
3030

3031
    @type fn: callable
3032
    @param fn: Formatting function
3033
    @type status_fn: callable
3034
    @param status_fn: Function to report fields' status
3035
    @type verbose: boolean
3036
    @param verbose: whether to use verbose field descriptions or not
3037

3038
    """
3039
    self._fn = fn
3040
    self._status_fn = status_fn
3041
    self._verbose = verbose
3042

    
3043
  def __call__(self, data):
3044
    """Returns a field's string representation.
3045

3046
    """
3047
    (status, value) = data
3048

    
3049
    # Report status
3050
    self._status_fn(status)
3051

    
3052
    if status == constants.RS_NORMAL:
3053
      return self._fn(value)
3054

    
3055
    assert value is None, \
3056
           "Found value %r for abnormal status %s" % (value, status)
3057

    
3058
    return FormatResultError(status, self._verbose)
3059

    
3060

    
3061
def FormatResultError(status, verbose):
3062
  """Formats result status other than L{constants.RS_NORMAL}.
3063

3064
  @param status: The result status
3065
  @type verbose: boolean
3066
  @param verbose: Whether to return the verbose text
3067
  @return: Text of result status
3068

3069
  """
3070
  assert status != constants.RS_NORMAL, \
3071
         "FormatResultError called with status equal to constants.RS_NORMAL"
3072
  try:
3073
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3074
  except KeyError:
3075
    raise NotImplementedError("Unknown status %s" % status)
3076
  else:
3077
    if verbose:
3078
      return verbose_text
3079
    return normal_text
3080

    
3081

    
3082
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3083
                      header=False, verbose=False):
3084
  """Formats data in L{objects.QueryResponse}.
3085

3086
  @type result: L{objects.QueryResponse}
3087
  @param result: result of query operation
3088
  @type unit: string
3089
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3090
    see L{utils.text.FormatUnit}
3091
  @type format_override: dict
3092
  @param format_override: Dictionary for overriding field formatting functions,
3093
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3094
  @type separator: string or None
3095
  @param separator: String used to separate fields
3096
  @type header: bool
3097
  @param header: Whether to output header row
3098
  @type verbose: boolean
3099
  @param verbose: whether to use verbose field descriptions or not
3100

3101
  """
3102
  if unit is None:
3103
    if separator:
3104
      unit = "m"
3105
    else:
3106
      unit = "h"
3107

    
3108
  if format_override is None:
3109
    format_override = {}
3110

    
3111
  stats = dict.fromkeys(constants.RS_ALL, 0)
3112

    
3113
  def _RecordStatus(status):
3114
    if status in stats:
3115
      stats[status] += 1
3116

    
3117
  columns = []
3118
  for fdef in result.fields:
3119
    assert fdef.title and fdef.name
3120
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3121
    columns.append(TableColumn(fdef.title,
3122
                               _QueryColumnFormatter(fn, _RecordStatus,
3123
                                                     verbose),
3124
                               align_right))
3125

    
3126
  table = FormatTable(result.data, columns, header, separator)
3127

    
3128
  # Collect statistics
3129
  assert len(stats) == len(constants.RS_ALL)
3130
  assert compat.all(count >= 0 for count in stats.values())
3131

    
3132
  # Determine overall status. If there was no data, unknown fields must be
3133
  # detected via the field definitions.
3134
  if (stats[constants.RS_UNKNOWN] or
3135
      (not result.data and _GetUnknownFields(result.fields))):
3136
    status = QR_UNKNOWN
3137
  elif compat.any(count > 0 for key, count in stats.items()
3138
                  if key != constants.RS_NORMAL):
3139
    status = QR_INCOMPLETE
3140
  else:
3141
    status = QR_NORMAL
3142

    
3143
  return (status, table)
3144

    
3145

    
3146
def _GetUnknownFields(fdefs):
3147
  """Returns list of unknown fields included in C{fdefs}.
3148

3149
  @type fdefs: list of L{objects.QueryFieldDefinition}
3150

3151
  """
3152
  return [fdef for fdef in fdefs
3153
          if fdef.kind == constants.QFT_UNKNOWN]
3154

    
3155

    
3156
def _WarnUnknownFields(fdefs):
3157
  """Prints a warning to stderr if a query included unknown fields.
3158

3159
  @type fdefs: list of L{objects.QueryFieldDefinition}
3160

3161
  """
3162
  unknown = _GetUnknownFields(fdefs)
3163
  if unknown:
3164
    ToStderr("Warning: Queried for unknown fields %s",
3165
             utils.CommaJoin(fdef.name for fdef in unknown))
3166
    return True
3167

    
3168
  return False
3169

    
3170

    
3171
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3172
                format_override=None, verbose=False, force_filter=False,
3173
                namefield=None, qfilter=None, isnumeric=False):
3174
  """Generic implementation for listing all items of a resource.
3175

3176
  @param resource: One of L{constants.QR_VIA_LUXI}
3177
  @type fields: list of strings
3178
  @param fields: List of fields to query for
3179
  @type names: list of strings
3180
  @param names: Names of items to query for
3181
  @type unit: string or None
3182
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3183
    None for automatic choice (human-readable for non-separator usage,
3184
    otherwise megabytes); this is a one-letter string
3185
  @type separator: string or None
3186
  @param separator: String used to separate fields
3187
  @type header: bool
3188
  @param header: Whether to show header row
3189
  @type force_filter: bool
3190
  @param force_filter: Whether to always treat names as filter
3191
  @type format_override: dict
3192
  @param format_override: Dictionary for overriding field formatting functions,
3193
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3194
  @type verbose: boolean
3195
  @param verbose: whether to use verbose field descriptions or not
3196
  @type namefield: string
3197
  @param namefield: Name of field to use for simple filters (see
3198
    L{qlang.MakeFilter} for details)
3199
  @type qfilter: list or None
3200
  @param qfilter: Query filter (in addition to names)
3201
  @param isnumeric: bool
3202
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3203
    any simple filters built by namefield should use integer values to
3204
    reflect that
3205

3206
  """
3207
  if not names:
3208
    names = None
3209

    
3210
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3211
                                isnumeric=isnumeric)
3212

    
3213
  if qfilter is None:
3214
    qfilter = namefilter
3215
  elif namefilter is not None:
3216
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3217

    
3218
  if cl is None:
3219
    cl = GetClient()
3220

    
3221
  response = cl.Query(resource, fields, qfilter)
3222

    
3223
  found_unknown = _WarnUnknownFields(response.fields)
3224

    
3225
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3226
                                     header=header,
3227
                                     format_override=format_override,
3228
                                     verbose=verbose)
3229

    
3230
  for line in data:
3231
    ToStdout(line)
3232

    
3233
  assert ((found_unknown and status == QR_UNKNOWN) or
3234
          (not found_unknown and status != QR_UNKNOWN))
3235

    
3236
  if status == QR_UNKNOWN:
3237
    return constants.EXIT_UNKNOWN_FIELD
3238

    
3239
  # TODO: Should the list command fail if not all data could be collected?
3240
  return constants.EXIT_SUCCESS
3241

    
3242

    
3243
def _FieldDescValues(fdef):
3244
  """Helper function for L{GenericListFields} to get query field description.
3245

3246
  @type fdef: L{objects.QueryFieldDefinition}
3247
  @rtype: list
3248

3249
  """
3250
  return [
3251
    fdef.name,
3252
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3253
    fdef.title,
3254
    fdef.doc,
3255
    ]
3256

    
3257

    
3258
def GenericListFields(resource, fields, separator, header, cl=None):
3259
  """Generic implementation for listing fields for a resource.
3260

3261
  @param resource: One of L{constants.QR_VIA_LUXI}
3262
  @type fields: list of strings
3263
  @param fields: List of fields to query for
3264
  @type separator: string or None
3265
  @param separator: String used to separate fields
3266
  @type header: bool
3267
  @param header: Whether to show header row
3268

3269
  """
3270
  if cl is None:
3271
    cl = GetClient()
3272

    
3273
  if not fields:
3274
    fields = None
3275

    
3276
  response = cl.QueryFields(resource, fields)
3277

    
3278
  found_unknown = _WarnUnknownFields(response.fields)
3279

    
3280
  columns = [
3281
    TableColumn("Name", str, False),
3282
    TableColumn("Type", str, False),
3283
    TableColumn("Title", str, False),
3284
    TableColumn("Description", str, False),
3285
    ]
3286

    
3287
  rows = map(_FieldDescValues, response.fields)
3288

    
3289
  for line in FormatTable(rows, columns, header, separator):
3290
    ToStdout(line)
3291

    
3292
  if found_unknown:
3293
    return constants.EXIT_UNKNOWN_FIELD
3294

    
3295
  return constants.EXIT_SUCCESS
3296

    
3297

    
3298
class TableColumn:
3299
  """Describes a column for L{FormatTable}.
3300

3301
  """
3302
  def __init__(self, title, fn, align_right):
3303
    """Initializes this class.
3304

3305
    @type title: string
3306
    @param title: Column title
3307
    @type fn: callable
3308
    @param fn: Formatting function
3309
    @type align_right: bool
3310
    @param align_right: Whether to align values on the right-hand side
3311

3312
    """
3313
    self.title = title
3314
    self.format = fn
3315
    self.align_right = align_right
3316

    
3317

    
3318
def _GetColFormatString(width, align_right):
3319
  """Returns the format string for a field.
3320

3321
  """
3322
  if align_right:
3323
    sign = ""
3324
  else:
3325
    sign = "-"
3326

    
3327
  return "%%%s%ss" % (sign, width)
3328

    
3329

    
3330
def FormatTable(rows, columns, header, separator):
3331
  """Formats data as a table.
3332

3333
  @type rows: list of lists
3334
  @param rows: Row data, one list per row
3335
  @type columns: list of L{TableColumn}
3336
  @param columns: Column descriptions
3337
  @type header: bool
3338
  @param header: Whether to show header row
3339
  @type separator: string or None
3340
  @param separator: String used to separate columns
3341

3342
  """
3343
  if header:
3344
    data = [[col.title for col in columns]]
3345
    colwidth = [len(col.title) for col in columns]
3346
  else:
3347
    data = []
3348
    colwidth = [0 for _ in columns]
3349

    
3350
  # Format row data
3351
  for row in rows:
3352
    assert len(row) == len(columns)
3353

    
3354
    formatted = [col.format(value) for value, col in zip(row, columns)]
3355

    
3356
    if separator is None:
3357
      # Update column widths
3358
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3359
        # Modifying a list's items while iterating is fine
3360
        colwidth[idx] = max(oldwidth, len(value))
3361

    
3362
    data.append(formatted)
3363

    
3364
  if separator is not None:
3365
    # Return early if a separator is used
3366
    return [separator.join(row) for row in data]
3367

    
3368
  if columns and not columns[-1].align_right:
3369
    # Avoid unnecessary spaces at end of line
3370
    colwidth[-1] = 0
3371

    
3372
  # Build format string
3373
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3374
                  for col, width in zip(columns, colwidth)])
3375

    
3376
  return [fmt % tuple(row) for row in data]
3377

    
3378

    
3379
def FormatTimestamp(ts):
3380
  """Formats a given timestamp.
3381

3382
  @type ts: timestamp
3383
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3384

3385
  @rtype: string
3386
  @return: a string with the formatted timestamp
3387

3388
  """
3389
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3390
    return "?"
3391

    
3392
  (sec, usecs) = ts
3393
  return utils.FormatTime(sec, usecs=usecs)
3394

    
3395

    
3396
def ParseTimespec(value):
3397
  """Parse a time specification.
3398

3399
  The following suffixed will be recognized:
3400

3401
    - s: seconds
3402
    - m: minutes
3403
    - h: hours
3404
    - d: day
3405
    - w: weeks
3406

3407
  Without any suffix, the value will be taken to be in seconds.
3408

3409
  """
3410
  value = str(value)
3411
  if not value:
3412
    raise errors.OpPrereqError("Empty time specification passed",
3413
                               errors.ECODE_INVAL)
3414
  suffix_map = {
3415
    "s": 1,
3416
    "m": 60,
3417
    "h": 3600,
3418
    "d": 86400,
3419
    "w": 604800,
3420
    }
3421
  if value[-1] not in suffix_map:
3422
    try:
3423
      value = int(value)
3424
    except (TypeError, ValueError):
3425
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3426
                                 errors.ECODE_INVAL)
3427
  else:
3428
    multiplier = suffix_map[value[-1]]
3429
    value = value[:-1]
3430
    if not value: # no data left after stripping the suffix
3431
      raise errors.OpPrereqError("Invalid time specification (only"
3432
                                 " suffix passed)", errors.ECODE_INVAL)
3433
    try:
3434
      value = int(value) * multiplier
3435
    except (TypeError, ValueError):
3436
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3437
                                 errors.ECODE_INVAL)
3438
  return value
3439

    
3440

    
3441
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3442
                   filter_master=False, nodegroup=None):
3443
  """Returns the names of online nodes.
3444

3445
  This function will also log a warning on stderr with the names of
3446
  the online nodes.
3447

3448
  @param nodes: if not empty, use only this subset of nodes (minus the
3449
      offline ones)
3450
  @param cl: if not None, luxi client to use
3451
  @type nowarn: boolean
3452
  @param nowarn: by default, this function will output a note with the
3453
      offline nodes that are skipped; if this parameter is True the
3454
      note is not displayed
3455
  @type secondary_ips: boolean
3456
  @param secondary_ips: if True, return the secondary IPs instead of the
3457
      names, useful for doing network traffic over the replication interface
3458
      (if any)
3459
  @type filter_master: boolean
3460
  @param filter_master: if True, do not return the master node in the list
3461
      (useful in coordination with secondary_ips where we cannot check our
3462
      node name against the list)
3463
  @type nodegroup: string
3464
  @param nodegroup: If set, only return nodes in this node group
3465

3466
  """
3467
  if cl is None:
3468
    cl = GetClient()
3469

    
3470
  qfilter = []
3471

    
3472
  if nodes:
3473
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3474

    
3475
  if nodegroup is not None:
3476
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3477
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3478

    
3479
  if filter_master:
3480
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3481

    
3482
  if qfilter:
3483
    if len(qfilter) > 1:
3484
      final_filter = [qlang.OP_AND] + qfilter
3485
    else:
3486
      assert len(qfilter) == 1
3487
      final_filter = qfilter[0]
3488
  else:
3489
    final_filter = None
3490

    
3491
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3492

    
3493
  def _IsOffline(row):
3494
    (_, (_, offline), _) = row
3495
    return offline
3496

    
3497
  def _GetName(row):
3498
    ((_, name), _, _) = row
3499
    return name
3500

    
3501
  def _GetSip(row):
3502
    (_, _, (_, sip)) = row
3503
    return sip
3504

    
3505
  (offline, online) = compat.partition(result.data, _IsOffline)
3506

    
3507
  if offline and not nowarn:
3508
    ToStderr("Note: skipping offline node(s): %s" %
3509
             utils.CommaJoin(map(_GetName, offline)))
3510

    
3511
  if secondary_ips:
3512
    fn = _GetSip
3513
  else:
3514
    fn = _GetName
3515

    
3516
  return map(fn, online)
3517

    
3518

    
3519
def _ToStream(stream, txt, *args):
3520
  """Write a message to a stream, bypassing the logging system
3521

3522
  @type stream: file object
3523
  @param stream: the file to which we should write
3524
  @type txt: str
3525
  @param txt: the message
3526

3527
  """
3528
  try:
3529
    if args:
3530
      args = tuple(args)
3531
      stream.write(txt % args)
3532
    else:
3533
      stream.write(txt)
3534
    stream.write("\n")
3535
    stream.flush()
3536
  except IOError, err:
3537
    if err.errno == errno.EPIPE:
3538
      # our terminal went away, we'll exit
3539
      sys.exit(constants.EXIT_FAILURE)
3540
    else:
3541
      raise
3542

    
3543

    
3544
def ToStdout(txt, *args):
3545
  """Write a message to stdout only, bypassing the logging system
3546

3547
  This is just a wrapper over _ToStream.
3548

3549
  @type txt: str
3550
  @param txt: the message
3551

3552
  """
3553
  _ToStream(sys.stdout, txt, *args)
3554

    
3555

    
3556
def ToStderr(txt, *args):
3557
  """Write a message to stderr only, bypassing the logging system
3558

3559
  This is just a wrapper over _ToStream.
3560

3561
  @type txt: str
3562
  @param txt: the message
3563

3564
  """
3565
  _ToStream(sys.stderr, txt, *args)
3566

    
3567

    
3568
class JobExecutor(object):
3569
  """Class which manages the submission and execution of multiple jobs.
3570

3571
  Note that instances of this class should not be reused between
3572
  GetResults() calls.
3573

3574
  """
3575
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3576
    self.queue = []
3577
    if cl is None:
3578
      cl = GetClient()
3579
    self.cl = cl
3580
    self.verbose = verbose
3581
    self.jobs = []
3582
    self.opts = opts
3583
    self.feedback_fn = feedback_fn
3584
    self._counter = itertools.count()
3585

    
3586
  @staticmethod
3587
  def _IfName(name, fmt):
3588
    """Helper function for formatting name.
3589

3590
    """
3591
    if name:
3592
      return fmt % name
3593

    
3594
    return ""
3595

    
3596
  def QueueJob(self, name, *ops):
3597
    """Record a job for later submit.
3598

3599
    @type name: string
3600
    @param name: a description of the job, will be used in WaitJobSet
3601

3602
    """
3603
    SetGenericOpcodeOpts(ops, self.opts)
3604
    self.queue.append((self._counter.next(), name, ops))
3605

    
3606
  def AddJobId(self, name, status, job_id):
3607
    """Adds a job ID to the internal queue.
3608

3609
    """
3610
    self.jobs.append((self._counter.next(), status, job_id, name))
3611

    
3612
  def SubmitPending(self, each=False):
3613
    """Submit all pending jobs.
3614

3615
    """
3616
    if each:
3617
      results = []
3618
      for (_, _, ops) in self.queue:
3619
        # SubmitJob will remove the success status, but raise an exception if
3620
        # the submission fails, so we'll notice that anyway.
3621
        results.append([True, self.cl.SubmitJob(ops)[0]])
3622
    else:
3623
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3624
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3625
      self.jobs.append((idx, status, data, name))
3626

    
3627
  def _ChooseJob(self):
3628
    """Choose a non-waiting/queued job to poll next.
3629

3630
    """
3631
    assert self.jobs, "_ChooseJob called with empty job list"
3632

    
3633
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3634
                               ["status"])
3635
    assert result
3636

    
3637
    for job_data, status in zip(self.jobs, result):
3638
      if (isinstance(status, list) and status and
3639
          status[0] in (constants.JOB_STATUS_QUEUED,
3640
                        constants.JOB_STATUS_WAITING,
3641
                        constants.JOB_STATUS_CANCELING)):
3642
        # job is still present and waiting
3643
        continue
3644
      # good candidate found (either running job or lost job)
3645
      self.jobs.remove(job_data)
3646
      return job_data
3647

    
3648
    # no job found
3649
    return self.jobs.pop(0)
3650

    
3651
  def GetResults(self):
3652
    """Wait for and return the results of all jobs.
3653

3654
    @rtype: list
3655
    @return: list of tuples (success, job results), in the same order
3656
        as the submitted jobs; if a job has failed, instead of the result
3657
        there will be the error message
3658

3659
    """
3660
    if not self.jobs:
3661
      self.SubmitPending()
3662
    results = []
3663
    if self.verbose:
3664
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3665
      if ok_jobs:
3666
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3667

    
3668
    # first, remove any non-submitted jobs
3669
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3670
    for idx, _, jid, name in failures:
3671
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3672
      results.append((idx, False, jid))
3673

    
3674
    while self.jobs:
3675
      (idx, _, jid, name) = self._ChooseJob()
3676
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3677
      try:
3678
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3679
        success = True
3680
      except errors.JobLost, err:
3681
        _, job_result = FormatError(err)
3682
        ToStderr("Job %s%s has been archived, cannot check its result",
3683
                 jid, self._IfName(name, " for %s"))
3684
        success = False
3685
      except (errors.GenericError, luxi.ProtocolError), err:
3686
        _, job_result = FormatError(err)
3687
        success = False
3688
        # the error message will always be shown, verbose or not
3689
        ToStderr("Job %s%s has failed: %s",
3690
                 jid, self._IfName(name, " for %s"), job_result)
3691

    
3692
      results.append((idx, success, job_result))
3693

    
3694
    # sort based on the index, then drop it
3695
    results.sort()
3696
    results = [i[1:] for i in results]
3697

    
3698
    return results
3699

    
3700
  def WaitOrShow(self, wait):
3701
    """Wait for job results or only print the job IDs.
3702

3703
    @type wait: boolean
3704
    @param wait: whether to wait or not
3705

3706
    """
3707
    if wait:
3708
      return self.GetResults()
3709
    else:
3710
      if not self.jobs:
3711
        self.SubmitPending()
3712
      for _, status, result, name in self.jobs:
3713
        if status:
3714
          ToStdout("%s: %s", result, name)
3715
        else:
3716
          ToStderr("Failure for %s: %s", name, result)
3717
      return [row[1:3] for row in self.jobs]
3718

    
3719

    
3720
def FormatParamsDictInfo(param_dict, actual):
3721
  """Formats a parameter dictionary.
3722

3723
  @type param_dict: dict
3724
  @param param_dict: the own parameters
3725
  @type actual: dict
3726
  @param actual: the current parameter set (including defaults)
3727
  @rtype: dict
3728
  @return: dictionary where the value of each parameter is either a fully
3729
      formatted string or a dictionary containing formatted strings
3730

3731
  """
3732
  ret = {}
3733
  for (key, data) in actual.items():
3734
    if isinstance(data, dict) and data:
3735
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3736
    else:
3737
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3738
  return ret
3739

    
3740

    
3741
def _FormatListInfoDefault(data, def_data):
3742
  if data is not None:
3743
    ret = utils.CommaJoin(data)
3744
  else:
3745
    ret = "default (%s)" % utils.CommaJoin(def_data)
3746
  return ret
3747

    
3748

    
3749
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3750
  """Formats an instance policy.
3751

3752
  @type custom_ipolicy: dict
3753
  @param custom_ipolicy: own policy
3754
  @type eff_ipolicy: dict
3755
  @param eff_ipolicy: effective policy (including defaults); ignored for
3756
      cluster
3757
  @type iscluster: bool
3758
  @param iscluster: the policy is at cluster level
3759
  @rtype: list of pairs
3760
  @return: formatted data, suitable for L{PrintGenericInfo}
3761

3762
  """
3763
  if iscluster:
3764
    eff_ipolicy = custom_ipolicy
3765

    
3766
  minmax_out = []
3767
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3768
  if custom_minmax:
3769
    for (k, minmax) in enumerate(custom_minmax):
3770
      minmax_out.append([
3771
        ("%s/%s" % (key, k),
3772
         FormatParamsDictInfo(minmax[key], minmax[key]))
3773
        for key in constants.ISPECS_MINMAX_KEYS
3774
        ])
3775
  else:
3776
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3777
      minmax_out.append([
3778
        ("%s/%s" % (key, k),
3779
         FormatParamsDictInfo({}, minmax[key]))
3780
        for key in constants.ISPECS_MINMAX_KEYS
3781
        ])
3782
  ret = [("bounds specs", minmax_out)]
3783

    
3784
  if iscluster:
3785
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3786
    ret.append(
3787
      (constants.ISPECS_STD,
3788
       FormatParamsDictInfo(stdspecs, stdspecs))
3789
      )
3790

    
3791
  ret.append(
3792
    ("allowed disk templates",
3793
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3794
                            eff_ipolicy[constants.IPOLICY_DTS]))
3795
    )
3796
  ret.extend([
3797
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3798
    for key in constants.IPOLICY_PARAMETERS
3799
    ])
3800
  return ret
3801

    
3802

    
3803
def _PrintSpecsParameters(buf, specs):
3804
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3805
  buf.write(",".join(values))
3806

    
3807

    
3808
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3809
  """Print the command option used to generate the given instance policy.
3810

3811
  Currently only the parts dealing with specs are supported.
3812

3813
  @type buf: StringIO
3814
  @param buf: stream to write into
3815
  @type ipolicy: dict
3816
  @param ipolicy: instance policy
3817
  @type isgroup: bool
3818
  @param isgroup: whether the policy is at group level
3819

3820
  """
3821
  if not isgroup:
3822
    stdspecs = ipolicy.get("std")
3823
    if stdspecs:
3824
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3825
      _PrintSpecsParameters(buf, stdspecs)
3826
  minmaxes = ipolicy.get("minmax", [])
3827
  first = True
3828
  for minmax in minmaxes:
3829
    minspecs = minmax.get("min")
3830
    maxspecs = minmax.get("max")
3831
    if minspecs and maxspecs:
3832
      if first:
3833
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3834
        first = False
3835
      else:
3836
        buf.write("//")
3837
      buf.write("min:")
3838
      _PrintSpecsParameters(buf, minspecs)
3839
      buf.write("/max:")
3840
      _PrintSpecsParameters(buf, maxspecs)
3841

    
3842

    
3843
def ConfirmOperation(names, list_type, text, extra=""):
3844
  """Ask the user to confirm an operation on a list of list_type.
3845

3846
  This function is used to request confirmation for doing an operation
3847
  on a given list of list_type.
3848

3849
  @type names: list
3850
  @param names: the list of names that we display when
3851
      we ask for confirmation
3852
  @type list_type: str
3853
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3854
  @type text: str
3855
  @param text: the operation that the user should confirm
3856
  @rtype: boolean
3857
  @return: True or False depending on user's confirmation.
3858

3859
  """
3860
  count = len(names)
3861
  msg = ("The %s will operate on %d %s.\n%s"
3862
         "Do you want to continue?" % (text, count, list_type, extra))
3863
  affected = (("\nAffected %s:\n" % list_type) +
3864
              "\n".join(["  %s" % name for name in names]))
3865

    
3866
  choices = [("y", True, "Yes, execute the %s" % text),
3867
             ("n", False, "No, abort the %s" % text)]
3868

    
3869
  if count > 20:
3870
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3871
    question = msg
3872
  else:
3873
    question = msg + affected
3874

    
3875
  choice = AskUser(question, choices)
3876
  if choice == "v":
3877
    choices.pop(1)
3878
    choice = AskUser(msg + affected, choices)
3879
  return choice
3880

    
3881

    
3882
def _MaybeParseUnit(elements):
3883
  """Parses and returns an array of potential values with units.
3884

3885
  """
3886
  parsed = {}
3887
  for k, v in elements.items():
3888
    if v == constants.VALUE_DEFAULT:
3889
      parsed[k] = v
3890
    else:
3891
      parsed[k] = utils.ParseUnit(v)
3892
  return parsed
3893

    
3894

    
3895
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3896
                             ispecs_disk_count, ispecs_disk_size,
3897
                             ispecs_nic_count, group_ipolicy, fill_all):
3898
  try:
3899
    if ispecs_mem_size:
3900
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3901
    if ispecs_disk_size:
3902
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3903
  except (TypeError, ValueError, errors.UnitParseError), err:
3904
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3905
                               " in policy: %s" %
3906
                               (ispecs_disk_size, ispecs_mem_size, err),
3907
                               errors.ECODE_INVAL)
3908

    
3909
  # prepare ipolicy dict
3910
  ispecs_transposed = {
3911
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3912
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3913
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3914
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3915
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3916
    }
3917

    
3918
  # first, check that the values given are correct
3919
  if group_ipolicy:
3920
    forced_type = TISPECS_GROUP_TYPES
3921
  else:
3922
    forced_type = TISPECS_CLUSTER_TYPES
3923
  for specs in ispecs_transposed.values():
3924
    assert type(specs) is dict
3925
    utils.ForceDictType(specs, forced_type)
3926

    
3927
  # then transpose
3928
  ispecs = {
3929
    constants.ISPECS_MIN: {},
3930
    constants.ISPECS_MAX: {},
3931
    constants.ISPECS_STD: {},
3932
    }
3933
  for (name, specs) in ispecs_transposed.iteritems():
3934
    assert name in constants.ISPECS_PARAMETERS
3935
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3936
      assert key in ispecs
3937
      ispecs[key][name] = val
3938
  minmax_out = {}
3939
  for key in constants.ISPECS_MINMAX_KEYS:
3940
    if fill_all:
3941
      minmax_out[key] = \
3942
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3943
    else:
3944
      minmax_out[key] = ispecs[key]
3945
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3946
  if fill_all:
3947
    ipolicy[constants.ISPECS_STD] = \
3948
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3949
                         ispecs[constants.ISPECS_STD])
3950
  else:
3951
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3952

    
3953

    
3954
def _ParseSpecUnit(spec, keyname):
3955
  ret = spec.copy()
3956
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3957
    if k in ret:
3958
      try:
3959
        ret[k] = utils.ParseUnit(ret[k])
3960
      except (TypeError, ValueError, errors.UnitParseError), err:
3961
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3962
                                    " specs: %s" % (k, ret[k], keyname, err)),
3963
                                   errors.ECODE_INVAL)
3964
  return ret
3965

    
3966

    
3967
def _ParseISpec(spec, keyname, required):
3968
  ret = _ParseSpecUnit(spec, keyname)
3969
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3970
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3971
  if required and missing:
3972
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3973
                               (keyname, utils.CommaJoin(missing)),
3974
                               errors.ECODE_INVAL)
3975
  return ret
3976

    
3977

    
3978
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3979
  ret = None
3980
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3981
      len(minmax_ispecs[0]) == 1):
3982
    for (key, spec) in minmax_ispecs[0].items():
3983
      # This loop is executed exactly once
3984
      if key in allowed_values and not spec:
3985
        ret = key
3986
  return ret
3987

    
3988

    
3989
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3990
                            group_ipolicy, allowed_values):
3991
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3992
  if found_allowed is not None:
3993
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3994
  elif minmax_ispecs is not None:
3995
    minmax_out = []
3996
    for mmpair in minmax_ispecs:
3997
      mmpair_out = {}
3998
      for (key, spec) in mmpair.items():
3999
        if key not in constants.ISPECS_MINMAX_KEYS:
4000
          msg = "Invalid key in bounds instance specifications: %s" % key
4001
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4002
        mmpair_out[key] = _ParseISpec(spec, key, True)
4003
      minmax_out.append(mmpair_out)
4004
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4005
  if std_ispecs is not None:
4006
    assert not group_ipolicy # This is not an option for gnt-group
4007
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4008

    
4009

    
4010
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4011
                          ispecs_cpu_count=None,
4012
                          ispecs_disk_count=None,
4013
                          ispecs_disk_size=None,
4014
                          ispecs_nic_count=None,
4015
                          minmax_ispecs=None,
4016
                          std_ispecs=None,
4017
                          ipolicy_disk_templates=None,
4018
                          ipolicy_vcpu_ratio=None,
4019
                          ipolicy_spindle_ratio=None,
4020
                          group_ipolicy=False,
4021
                          allowed_values=None,
4022
                          fill_all=False):
4023
  """Creation of instance policy based on command line options.
4024

4025
  @param fill_all: whether for cluster policies we should ensure that
4026
    all values are filled
4027

4028
  """
4029
  assert not (fill_all and allowed_values)
4030

    
4031
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4032
                 ispecs_disk_size or ispecs_nic_count)
4033
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4034
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4035
                               " together with any --ipolicy-xxx-specs option",
4036
                               errors.ECODE_INVAL)
4037

    
4038
  ipolicy_out = objects.MakeEmptyIPolicy()
4039
  if split_specs:
4040
    assert fill_all
4041
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4042
                             ispecs_disk_count, ispecs_disk_size,
4043
                             ispecs_nic_count, group_ipolicy, fill_all)
4044
  elif (minmax_ispecs is not None or std_ispecs is not None):
4045
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4046
                            group_ipolicy, allowed_values)
4047

    
4048
  if ipolicy_disk_templates is not None:
4049
    if allowed_values and ipolicy_disk_templates in allowed_values:
4050
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4051
    else:
4052
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4053
  if ipolicy_vcpu_ratio is not None:
4054
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4055
  if ipolicy_spindle_ratio is not None:
4056
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4057

    
4058
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4059

    
4060
  if not group_ipolicy and fill_all:
4061
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4062

    
4063
  return ipolicy_out
4064

    
4065

    
4066
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4067
  """Formatting core of L{PrintGenericInfo}.
4068

4069
  @param buf: (string) stream to accumulate the result into
4070
  @param data: data to format
4071
  @type level: int
4072
  @param level: depth in the data hierarchy, used for indenting
4073
  @type afterkey: bool
4074
  @param afterkey: True when we are in the middle of a line after a key (used
4075
      to properly add newlines or indentation)
4076

4077
  """
4078
  baseind = "  "
4079
  if isinstance(data, dict):
4080
    if not data:
4081
      buf.write("\n")
4082
    else:
4083
      if afterkey:
4084
        buf.write("\n")
4085
        doindent = True
4086
      else:
4087
        doindent = False
4088
      for key in sorted(data):
4089
        if doindent:
4090
          buf.write(baseind * level)
4091
        else:
4092
          doindent = True
4093
        buf.write(key)
4094
        buf.write(": ")
4095
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4096
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4097
    # list of tuples (an ordered dictionary)
4098
    if afterkey:
4099
      buf.write("\n")
4100
      doindent = True
4101
    else:
4102
      doindent = False
4103
    for (key, val) in data:
4104
      if doindent:
4105
        buf.write(baseind * level)
4106
      else:
4107
        doindent = True
4108
      buf.write(key)
4109
      buf.write(": ")
4110
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4111
  elif isinstance(data, list):
4112
    if not data:
4113
      buf.write("\n")
4114
    else:
4115
      if afterkey:
4116
        buf.write("\n")
4117
        doindent = True
4118
      else:
4119
        doindent = False
4120
      for item in data:
4121
        if doindent:
4122
          buf.write(baseind * level)
4123
        else:
4124
          doindent = True
4125
        buf.write("-")
4126
        buf.write(baseind[1:])
4127
        _SerializeGenericInfo(buf, item, level + 1)
4128
  else:
4129
    # This branch should be only taken for strings, but it's practically
4130
    # impossible to guarantee that no other types are produced somewhere
4131
    buf.write(str(data))
4132
    buf.write("\n")
4133

    
4134

    
4135
def PrintGenericInfo(data):
4136
  """Print information formatted according to the hierarchy.
4137

4138
  The output is a valid YAML string.
4139

4140
  @param data: the data to print. It's a hierarchical structure whose elements
4141
      can be:
4142
        - dictionaries, where keys are strings and values are of any of the
4143
          types listed here
4144
        - lists of pairs (key, value), where key is a string and value is of
4145
          any of the types listed here; it's a way to encode ordered
4146
          dictionaries
4147
        - lists of any of the types listed here
4148
        - strings
4149

4150
  """
4151
  buf = StringIO()
4152
  _SerializeGenericInfo(buf, data, 0)
4153
  ToStdout(buf.getvalue().rstrip("\n"))