Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 274c7cab

History | View | Annotate | Download (136.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HOTPLUG_OPT",
99
  "HVLIST_OPT",
100
  "HVOPTS_OPT",
101
  "HYPERVISOR_OPT",
102
  "IALLOCATOR_OPT",
103
  "DEFAULT_IALLOCATOR_OPT",
104
  "IDENTIFY_DEFAULTS_OPT",
105
  "IGNORE_CONSIST_OPT",
106
  "IGNORE_ERRORS_OPT",
107
  "IGNORE_FAILURES_OPT",
108
  "IGNORE_OFFLINE_OPT",
109
  "IGNORE_REMOVE_FAILURES_OPT",
110
  "IGNORE_SECONDARIES_OPT",
111
  "IGNORE_SIZE_OPT",
112
  "INCLUDEDEFAULTS_OPT",
113
  "INTERVAL_OPT",
114
  "MAC_PREFIX_OPT",
115
  "MAINTAIN_NODE_HEALTH_OPT",
116
  "MASTER_NETDEV_OPT",
117
  "MASTER_NETMASK_OPT",
118
  "MC_OPT",
119
  "MIGRATION_MODE_OPT",
120
  "MODIFY_ETCHOSTS_OPT",
121
  "NET_OPT",
122
  "NETWORK_OPT",
123
  "NETWORK6_OPT",
124
  "NEW_CLUSTER_CERT_OPT",
125
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
126
  "NEW_CONFD_HMAC_KEY_OPT",
127
  "NEW_RAPI_CERT_OPT",
128
  "NEW_PRIMARY_OPT",
129
  "NEW_SECONDARY_OPT",
130
  "NEW_SPICE_CERT_OPT",
131
  "NIC_PARAMS_OPT",
132
  "NOCONFLICTSCHECK_OPT",
133
  "NODE_FORCE_JOIN_OPT",
134
  "NODE_LIST_OPT",
135
  "NODE_PLACEMENT_OPT",
136
  "NODEGROUP_OPT",
137
  "NODE_PARAMS_OPT",
138
  "NODE_POWERED_OPT",
139
  "NODRBD_STORAGE_OPT",
140
  "NOHDR_OPT",
141
  "NOIPCHECK_OPT",
142
  "NO_INSTALL_OPT",
143
  "NONAMECHECK_OPT",
144
  "NOLVM_STORAGE_OPT",
145
  "NOMODIFY_ETCHOSTS_OPT",
146
  "NOMODIFY_SSH_SETUP_OPT",
147
  "NONICS_OPT",
148
  "NONLIVE_OPT",
149
  "NONPLUS1_OPT",
150
  "NORUNTIME_CHGS_OPT",
151
  "NOSHUTDOWN_OPT",
152
  "NOSTART_OPT",
153
  "NOSSH_KEYCHECK_OPT",
154
  "NOVOTING_OPT",
155
  "NO_REMEMBER_OPT",
156
  "NWSYNC_OPT",
157
  "OFFLINE_INST_OPT",
158
  "ONLINE_INST_OPT",
159
  "ON_PRIMARY_OPT",
160
  "ON_SECONDARY_OPT",
161
  "OFFLINE_OPT",
162
  "OSPARAMS_OPT",
163
  "OS_OPT",
164
  "OS_SIZE_OPT",
165
  "OOB_TIMEOUT_OPT",
166
  "POWER_DELAY_OPT",
167
  "PREALLOC_WIPE_DISKS_OPT",
168
  "PRIMARY_IP_VERSION_OPT",
169
  "PRIMARY_ONLY_OPT",
170
  "PRINT_JOBID_OPT",
171
  "PRIORITY_OPT",
172
  "RAPI_CERT_OPT",
173
  "READD_OPT",
174
  "REASON_OPT",
175
  "REBOOT_TYPE_OPT",
176
  "REMOVE_INSTANCE_OPT",
177
  "REMOVE_RESERVED_IPS_OPT",
178
  "REMOVE_UIDS_OPT",
179
  "RESERVED_LVS_OPT",
180
  "RUNTIME_MEM_OPT",
181
  "ROMAN_OPT",
182
  "SECONDARY_IP_OPT",
183
  "SECONDARY_ONLY_OPT",
184
  "SELECT_OS_OPT",
185
  "SEP_OPT",
186
  "SHOWCMD_OPT",
187
  "SHOW_MACHINE_OPT",
188
  "SHUTDOWN_TIMEOUT_OPT",
189
  "SINGLE_NODE_OPT",
190
  "SPECS_CPU_COUNT_OPT",
191
  "SPECS_DISK_COUNT_OPT",
192
  "SPECS_DISK_SIZE_OPT",
193
  "SPECS_MEM_SIZE_OPT",
194
  "SPECS_NIC_COUNT_OPT",
195
  "SPLIT_ISPECS_OPTS",
196
  "IPOLICY_STD_SPECS_OPT",
197
  "IPOLICY_DISK_TEMPLATES",
198
  "IPOLICY_VCPU_RATIO",
199
  "SPICE_CACERT_OPT",
200
  "SPICE_CERT_OPT",
201
  "SRC_DIR_OPT",
202
  "SRC_NODE_OPT",
203
  "SUBMIT_OPT",
204
  "SUBMIT_OPTS",
205
  "STARTUP_PAUSED_OPT",
206
  "STATIC_OPT",
207
  "SYNC_OPT",
208
  "TAG_ADD_OPT",
209
  "TAG_SRC_OPT",
210
  "TIMEOUT_OPT",
211
  "TO_GROUP_OPT",
212
  "UIDPOOL_OPT",
213
  "USEUNITS_OPT",
214
  "USE_EXTERNAL_MIP_SCRIPT",
215
  "USE_REPL_NET_OPT",
216
  "VERBOSE_OPT",
217
  "VG_NAME_OPT",
218
  "WFSYNC_OPT",
219
  "YES_DOIT_OPT",
220
  "DISK_STATE_OPT",
221
  "HV_STATE_OPT",
222
  "IGNORE_IPOLICY_OPT",
223
  "INSTANCE_POLICY_OPTS",
224
  # Generic functions for CLI programs
225
  "ConfirmOperation",
226
  "CreateIPolicyFromOpts",
227
  "GenericMain",
228
  "GenericInstanceCreate",
229
  "GenericList",
230
  "GenericListFields",
231
  "GetClient",
232
  "GetOnlineNodes",
233
  "JobExecutor",
234
  "JobSubmittedException",
235
  "ParseTimespec",
236
  "RunWhileClusterStopped",
237
  "SubmitOpCode",
238
  "SubmitOrSend",
239
  "UsesRPC",
240
  # Formatting functions
241
  "ToStderr", "ToStdout",
242
  "FormatError",
243
  "FormatQueryResult",
244
  "FormatParamsDictInfo",
245
  "FormatPolicyInfo",
246
  "PrintIPolicyCommand",
247
  "PrintGenericInfo",
248
  "GenerateTable",
249
  "AskUser",
250
  "FormatTimestamp",
251
  "FormatLogMessage",
252
  # Tags functions
253
  "ListTags",
254
  "AddTags",
255
  "RemoveTags",
256
  # command line options support infrastructure
257
  "ARGS_MANY_INSTANCES",
258
  "ARGS_MANY_NODES",
259
  "ARGS_MANY_GROUPS",
260
  "ARGS_MANY_NETWORKS",
261
  "ARGS_NONE",
262
  "ARGS_ONE_INSTANCE",
263
  "ARGS_ONE_NODE",
264
  "ARGS_ONE_GROUP",
265
  "ARGS_ONE_OS",
266
  "ARGS_ONE_NETWORK",
267
  "ArgChoice",
268
  "ArgCommand",
269
  "ArgFile",
270
  "ArgGroup",
271
  "ArgHost",
272
  "ArgInstance",
273
  "ArgJobId",
274
  "ArgNetwork",
275
  "ArgNode",
276
  "ArgOs",
277
  "ArgExtStorage",
278
  "ArgSuggest",
279
  "ArgUnknown",
280
  "OPT_COMPL_INST_ADD_NODES",
281
  "OPT_COMPL_MANY_NODES",
282
  "OPT_COMPL_ONE_IALLOCATOR",
283
  "OPT_COMPL_ONE_INSTANCE",
284
  "OPT_COMPL_ONE_NODE",
285
  "OPT_COMPL_ONE_NODEGROUP",
286
  "OPT_COMPL_ONE_NETWORK",
287
  "OPT_COMPL_ONE_OS",
288
  "OPT_COMPL_ONE_EXTSTORAGE",
289
  "cli_option",
290
  "SplitNodeOption",
291
  "CalculateOSNames",
292
  "ParseFields",
293
  "COMMON_CREATE_OPTS",
294
  ]
295

    
296
NO_PREFIX = "no_"
297
UN_PREFIX = "-"
298

    
299
#: Priorities (sorted)
300
_PRIORITY_NAMES = [
301
  ("low", constants.OP_PRIO_LOW),
302
  ("normal", constants.OP_PRIO_NORMAL),
303
  ("high", constants.OP_PRIO_HIGH),
304
  ]
305

    
306
#: Priority dictionary for easier lookup
307
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
308
# we migrate to Python 2.6
309
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
310

    
311
# Query result status for clients
312
(QR_NORMAL,
313
 QR_UNKNOWN,
314
 QR_INCOMPLETE) = range(3)
315

    
316
#: Maximum batch size for ChooseJob
317
_CHOOSE_BATCH = 25
318

    
319

    
320
# constants used to create InstancePolicy dictionary
321
TISPECS_GROUP_TYPES = {
322
  constants.ISPECS_MIN: constants.VTYPE_INT,
323
  constants.ISPECS_MAX: constants.VTYPE_INT,
324
  }
325

    
326
TISPECS_CLUSTER_TYPES = {
327
  constants.ISPECS_MIN: constants.VTYPE_INT,
328
  constants.ISPECS_MAX: constants.VTYPE_INT,
329
  constants.ISPECS_STD: constants.VTYPE_INT,
330
  }
331

    
332
#: User-friendly names for query2 field types
333
_QFT_NAMES = {
334
  constants.QFT_UNKNOWN: "Unknown",
335
  constants.QFT_TEXT: "Text",
336
  constants.QFT_BOOL: "Boolean",
337
  constants.QFT_NUMBER: "Number",
338
  constants.QFT_UNIT: "Storage size",
339
  constants.QFT_TIMESTAMP: "Timestamp",
340
  constants.QFT_OTHER: "Custom",
341
  }
342

    
343

    
344
class _Argument:
345
  def __init__(self, min=0, max=None): # pylint: disable=W0622
346
    self.min = min
347
    self.max = max
348

    
349
  def __repr__(self):
350
    return ("<%s min=%s max=%s>" %
351
            (self.__class__.__name__, self.min, self.max))
352

    
353

    
354
class ArgSuggest(_Argument):
355
  """Suggesting argument.
356

357
  Value can be any of the ones passed to the constructor.
358

359
  """
360
  # pylint: disable=W0622
361
  def __init__(self, min=0, max=None, choices=None):
362
    _Argument.__init__(self, min=min, max=max)
363
    self.choices = choices
364

    
365
  def __repr__(self):
366
    return ("<%s min=%s max=%s choices=%r>" %
367
            (self.__class__.__name__, self.min, self.max, self.choices))
368

    
369

    
370
class ArgChoice(ArgSuggest):
371
  """Choice argument.
372

373
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
374
  but value must be one of the choices.
375

376
  """
377

    
378

    
379
class ArgUnknown(_Argument):
380
  """Unknown argument to program (e.g. determined at runtime).
381

382
  """
383

    
384

    
385
class ArgInstance(_Argument):
386
  """Instances argument.
387

388
  """
389

    
390

    
391
class ArgNode(_Argument):
392
  """Node argument.
393

394
  """
395

    
396

    
397
class ArgNetwork(_Argument):
398
  """Network argument.
399

400
  """
401

    
402

    
403
class ArgGroup(_Argument):
404
  """Node group argument.
405

406
  """
407

    
408

    
409
class ArgJobId(_Argument):
410
  """Job ID argument.
411

412
  """
413

    
414

    
415
class ArgFile(_Argument):
416
  """File path argument.
417

418
  """
419

    
420

    
421
class ArgCommand(_Argument):
422
  """Command argument.
423

424
  """
425

    
426

    
427
class ArgHost(_Argument):
428
  """Host argument.
429

430
  """
431

    
432

    
433
class ArgOs(_Argument):
434
  """OS argument.
435

436
  """
437

    
438

    
439
class ArgExtStorage(_Argument):
440
  """ExtStorage argument.
441

442
  """
443

    
444

    
445
ARGS_NONE = []
446
ARGS_MANY_INSTANCES = [ArgInstance()]
447
ARGS_MANY_NETWORKS = [ArgNetwork()]
448
ARGS_MANY_NODES = [ArgNode()]
449
ARGS_MANY_GROUPS = [ArgGroup()]
450
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
451
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
452
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
453
# TODO
454
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
455
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
456

    
457

    
458
def _ExtractTagsObject(opts, args):
459
  """Extract the tag type object.
460

461
  Note that this function will modify its args parameter.
462

463
  """
464
  if not hasattr(opts, "tag_type"):
465
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
466
  kind = opts.tag_type
467
  if kind == constants.TAG_CLUSTER:
468
    retval = kind, None
469
  elif kind in (constants.TAG_NODEGROUP,
470
                constants.TAG_NODE,
471
                constants.TAG_NETWORK,
472
                constants.TAG_INSTANCE):
473
    if not args:
474
      raise errors.OpPrereqError("no arguments passed to the command",
475
                                 errors.ECODE_INVAL)
476
    name = args.pop(0)
477
    retval = kind, name
478
  else:
479
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
480
  return retval
481

    
482

    
483
def _ExtendTags(opts, args):
484
  """Extend the args if a source file has been given.
485

486
  This function will extend the tags with the contents of the file
487
  passed in the 'tags_source' attribute of the opts parameter. A file
488
  named '-' will be replaced by stdin.
489

490
  """
491
  fname = opts.tags_source
492
  if fname is None:
493
    return
494
  if fname == "-":
495
    new_fh = sys.stdin
496
  else:
497
    new_fh = open(fname, "r")
498
  new_data = []
499
  try:
500
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
501
    # because of python bug 1633941
502
    while True:
503
      line = new_fh.readline()
504
      if not line:
505
        break
506
      new_data.append(line.strip())
507
  finally:
508
    new_fh.close()
509
  args.extend(new_data)
510

    
511

    
512
def ListTags(opts, args):
513
  """List the tags on a given object.
514

515
  This is a generic implementation that knows how to deal with all
516
  three cases of tag objects (cluster, node, instance). The opts
517
  argument is expected to contain a tag_type field denoting what
518
  object type we work on.
519

520
  """
521
  kind, name = _ExtractTagsObject(opts, args)
522
  cl = GetClient(query=True)
523
  result = cl.QueryTags(kind, name)
524
  result = list(result)
525
  result.sort()
526
  for tag in result:
527
    ToStdout(tag)
528

    
529

    
530
def AddTags(opts, args):
531
  """Add tags on a given object.
532

533
  This is a generic implementation that knows how to deal with all
534
  three cases of tag objects (cluster, node, instance). The opts
535
  argument is expected to contain a tag_type field denoting what
536
  object type we work on.
537

538
  """
539
  kind, name = _ExtractTagsObject(opts, args)
540
  _ExtendTags(opts, args)
541
  if not args:
542
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
543
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
544
  SubmitOrSend(op, opts)
545

    
546

    
547
def RemoveTags(opts, args):
548
  """Remove tags from a given object.
549

550
  This is a generic implementation that knows how to deal with all
551
  three cases of tag objects (cluster, node, instance). The opts
552
  argument is expected to contain a tag_type field denoting what
553
  object type we work on.
554

555
  """
556
  kind, name = _ExtractTagsObject(opts, args)
557
  _ExtendTags(opts, args)
558
  if not args:
559
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
560
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
561
  SubmitOrSend(op, opts)
562

    
563

    
564
def check_unit(option, opt, value): # pylint: disable=W0613
565
  """OptParsers custom converter for units.
566

567
  """
568
  try:
569
    return utils.ParseUnit(value)
570
  except errors.UnitParseError, err:
571
    raise OptionValueError("option %s: %s" % (opt, err))
572

    
573

    
574
def _SplitKeyVal(opt, data, parse_prefixes):
575
  """Convert a KeyVal string into a dict.
576

577
  This function will convert a key=val[,...] string into a dict. Empty
578
  values will be converted specially: keys which have the prefix 'no_'
579
  will have the value=False and the prefix stripped, keys with the prefix
580
  "-" will have value=None and the prefix stripped, and the others will
581
  have value=True.
582

583
  @type opt: string
584
  @param opt: a string holding the option name for which we process the
585
      data, used in building error messages
586
  @type data: string
587
  @param data: a string of the format key=val,key=val,...
588
  @type parse_prefixes: bool
589
  @param parse_prefixes: whether to handle prefixes specially
590
  @rtype: dict
591
  @return: {key=val, key=val}
592
  @raises errors.ParameterError: if there are duplicate keys
593

594
  """
595
  kv_dict = {}
596
  if data:
597
    for elem in utils.UnescapeAndSplit(data, sep=","):
598
      if "=" in elem:
599
        key, val = elem.split("=", 1)
600
      elif parse_prefixes:
601
        if elem.startswith(NO_PREFIX):
602
          key, val = elem[len(NO_PREFIX):], False
603
        elif elem.startswith(UN_PREFIX):
604
          key, val = elem[len(UN_PREFIX):], None
605
        else:
606
          key, val = elem, True
607
      else:
608
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
609
                                    (elem, opt))
610
      if key in kv_dict:
611
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
612
                                    (key, opt))
613
      kv_dict[key] = val
614
  return kv_dict
615

    
616

    
617
def _SplitIdentKeyVal(opt, value, parse_prefixes):
618
  """Helper function to parse "ident:key=val,key=val" options.
619

620
  @type opt: string
621
  @param opt: option name, used in error messages
622
  @type value: string
623
  @param value: expected to be in the format "ident:key=val,key=val,..."
624
  @type parse_prefixes: bool
625
  @param parse_prefixes: whether to handle prefixes specially (see
626
      L{_SplitKeyVal})
627
  @rtype: tuple
628
  @return: (ident, {key=val, key=val})
629
  @raises errors.ParameterError: in case of duplicates or other parsing errors
630

631
  """
632
  if ":" not in value:
633
    ident, rest = value, ""
634
  else:
635
    ident, rest = value.split(":", 1)
636

    
637
  if parse_prefixes and ident.startswith(NO_PREFIX):
638
    if rest:
639
      msg = "Cannot pass options when removing parameter groups: %s" % value
640
      raise errors.ParameterError(msg)
641
    retval = (ident[len(NO_PREFIX):], False)
642
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
643
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
644
    if rest:
645
      msg = "Cannot pass options when removing parameter groups: %s" % value
646
      raise errors.ParameterError(msg)
647
    retval = (ident[len(UN_PREFIX):], None)
648
  else:
649
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
650
    retval = (ident, kv_dict)
651
  return retval
652

    
653

    
654
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
655
  """Custom parser for ident:key=val,key=val options.
656

657
  This will store the parsed values as a tuple (ident, {key: val}). As such,
658
  multiple uses of this option via action=append is possible.
659

660
  """
661
  return _SplitIdentKeyVal(opt, value, True)
662

    
663

    
664
def check_key_val(option, opt, value):  # pylint: disable=W0613
665
  """Custom parser class for key=val,key=val options.
666

667
  This will store the parsed values as a dict {key: val}.
668

669
  """
670
  return _SplitKeyVal(opt, value, True)
671

    
672

    
673
def _SplitListKeyVal(opt, value):
674
  retval = {}
675
  for elem in value.split("/"):
676
    if not elem:
677
      raise errors.ParameterError("Empty section in option '%s'" % opt)
678
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
679
    if ident in retval:
680
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
681
             (ident, opt, elem))
682
      raise errors.ParameterError(msg)
683
    retval[ident] = valdict
684
  return retval
685

    
686

    
687
def check_multilist_ident_key_val(_, opt, value):
688
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
689

690
  @rtype: list of dictionary
691
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
692

693
  """
694
  retval = []
695
  for line in value.split("//"):
696
    retval.append(_SplitListKeyVal(opt, line))
697
  return retval
698

    
699

    
700
def check_bool(option, opt, value): # pylint: disable=W0613
701
  """Custom parser for yes/no options.
702

703
  This will store the parsed value as either True or False.
704

705
  """
706
  value = value.lower()
707
  if value == constants.VALUE_FALSE or value == "no":
708
    return False
709
  elif value == constants.VALUE_TRUE or value == "yes":
710
    return True
711
  else:
712
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
713

    
714

    
715
def check_list(option, opt, value): # pylint: disable=W0613
716
  """Custom parser for comma-separated lists.
717

718
  """
719
  # we have to make this explicit check since "".split(",") is [""],
720
  # not an empty list :(
721
  if not value:
722
    return []
723
  else:
724
    return utils.UnescapeAndSplit(value)
725

    
726

    
727
def check_maybefloat(option, opt, value): # pylint: disable=W0613
728
  """Custom parser for float numbers which might be also defaults.
729

730
  """
731
  value = value.lower()
732

    
733
  if value == constants.VALUE_DEFAULT:
734
    return value
735
  else:
736
    return float(value)
737

    
738

    
739
# completion_suggestion is normally a list. Using numeric values not evaluating
740
# to False for dynamic completion.
741
(OPT_COMPL_MANY_NODES,
742
 OPT_COMPL_ONE_NODE,
743
 OPT_COMPL_ONE_INSTANCE,
744
 OPT_COMPL_ONE_OS,
745
 OPT_COMPL_ONE_EXTSTORAGE,
746
 OPT_COMPL_ONE_IALLOCATOR,
747
 OPT_COMPL_ONE_NETWORK,
748
 OPT_COMPL_INST_ADD_NODES,
749
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
750

    
751
OPT_COMPL_ALL = compat.UniqueFrozenset([
752
  OPT_COMPL_MANY_NODES,
753
  OPT_COMPL_ONE_NODE,
754
  OPT_COMPL_ONE_INSTANCE,
755
  OPT_COMPL_ONE_OS,
756
  OPT_COMPL_ONE_EXTSTORAGE,
757
  OPT_COMPL_ONE_IALLOCATOR,
758
  OPT_COMPL_ONE_NETWORK,
759
  OPT_COMPL_INST_ADD_NODES,
760
  OPT_COMPL_ONE_NODEGROUP,
761
  ])
762

    
763

    
764
class CliOption(Option):
765
  """Custom option class for optparse.
766

767
  """
768
  ATTRS = Option.ATTRS + [
769
    "completion_suggest",
770
    ]
771
  TYPES = Option.TYPES + (
772
    "multilistidentkeyval",
773
    "identkeyval",
774
    "keyval",
775
    "unit",
776
    "bool",
777
    "list",
778
    "maybefloat",
779
    )
780
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
781
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
782
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
783
  TYPE_CHECKER["keyval"] = check_key_val
784
  TYPE_CHECKER["unit"] = check_unit
785
  TYPE_CHECKER["bool"] = check_bool
786
  TYPE_CHECKER["list"] = check_list
787
  TYPE_CHECKER["maybefloat"] = check_maybefloat
788

    
789

    
790
# optparse.py sets make_option, so we do it for our own option class, too
791
cli_option = CliOption
792

    
793

    
794
_YORNO = "yes|no"
795

    
796
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
797
                       help="Increase debugging level")
798

    
799
NOHDR_OPT = cli_option("--no-headers", default=False,
800
                       action="store_true", dest="no_headers",
801
                       help="Don't display column headers")
802

    
803
SEP_OPT = cli_option("--separator", default=None,
804
                     action="store", dest="separator",
805
                     help=("Separator between output fields"
806
                           " (defaults to one space)"))
807

    
808
USEUNITS_OPT = cli_option("--units", default=None,
809
                          dest="units", choices=("h", "m", "g", "t"),
810
                          help="Specify units for output (one of h/m/g/t)")
811

    
812
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
813
                        type="string", metavar="FIELDS",
814
                        help="Comma separated list of output fields")
815

    
816
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
817
                       default=False, help="Force the operation")
818

    
819
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
820
                         default=False, help="Do not require confirmation")
821

    
822
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
823
                                  action="store_true", default=False,
824
                                  help=("Ignore offline nodes and do as much"
825
                                        " as possible"))
826

    
827
TAG_ADD_OPT = cli_option("--tags", dest="tags",
828
                         default=None, help="Comma-separated list of instance"
829
                                            " tags")
830

    
831
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
832
                         default=None, help="File with tag names")
833

    
834
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
835
                        default=False, action="store_true",
836
                        help=("Submit the job and return the job ID, but"
837
                              " don't wait for the job to finish"))
838

    
839
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
840
                             default=False, action="store_true",
841
                             help=("Additionally print the job as first line"
842
                                   " on stdout (for scripting)."))
843

    
844
SYNC_OPT = cli_option("--sync", dest="do_locking",
845
                      default=False, action="store_true",
846
                      help=("Grab locks while doing the queries"
847
                            " in order to ensure more consistent results"))
848

    
849
DRY_RUN_OPT = cli_option("--dry-run", default=False,
850
                         action="store_true",
851
                         help=("Do not execute the operation, just run the"
852
                               " check steps and verify if it could be"
853
                               " executed"))
854

    
855
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
856
                         action="store_true",
857
                         help="Increase the verbosity of the operation")
858

    
859
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
860
                              action="store_true", dest="simulate_errors",
861
                              help="Debugging option that makes the operation"
862
                              " treat most runtime checks as failed")
863

    
864
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
865
                        default=True, action="store_false",
866
                        help="Don't wait for sync (DANGEROUS!)")
867

    
868
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
869
                        default=False, action="store_true",
870
                        help="Wait for disks to sync")
871

    
872
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
873
                             action="store_true", default=False,
874
                             help="Enable offline instance")
875

    
876
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
877
                              action="store_true", default=False,
878
                              help="Disable down instance")
879

    
880
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
881
                               help=("Custom disk setup (%s)" %
882
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
883
                               default=None, metavar="TEMPL",
884
                               choices=list(constants.DISK_TEMPLATES))
885

    
886
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
887
                        help="Do not create any network cards for"
888
                        " the instance")
889

    
890
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
891
                               help="Relative path under default cluster-wide"
892
                               " file storage dir to store file-based disks",
893
                               default=None, metavar="<DIR>")
894

    
895
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
896
                                  help="Driver to use for image files",
897
                                  default="loop", metavar="<DRIVER>",
898
                                  choices=list(constants.FILE_DRIVER))
899

    
900
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
901
                            help="Select nodes for the instance automatically"
902
                            " using the <NAME> iallocator plugin",
903
                            default=None, type="string",
904
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
905

    
906
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
907
                                    metavar="<NAME>",
908
                                    help="Set the default instance"
909
                                    " allocator plugin",
910
                                    default=None, type="string",
911
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
912

    
913
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
914
                    metavar="<os>",
915
                    completion_suggest=OPT_COMPL_ONE_OS)
916

    
917
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
918
                          type="keyval", default={},
919
                          help="OS parameters")
920

    
921
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
922
                               action="store_true", default=False,
923
                               help="Force an unknown variant")
924

    
925
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
926
                            action="store_true", default=False,
927
                            help="Do not install the OS (will"
928
                            " enable no-start)")
929

    
930
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
931
                                dest="allow_runtime_chgs",
932
                                default=True, action="store_false",
933
                                help="Don't allow runtime changes")
934

    
935
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
936
                         type="keyval", default={},
937
                         help="Backend parameters")
938

    
939
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
940
                        default={}, dest="hvparams",
941
                        help="Hypervisor parameters")
942

    
943
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
944
                             help="Disk template parameters, in the format"
945
                             " template:option=value,option=value,...",
946
                             type="identkeyval", action="append", default=[])
947

    
948
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
949
                                 type="keyval", default={},
950
                                 help="Memory size specs: list of key=value,"
951
                                " where key is one of min, max, std"
952
                                 " (in MB or using a unit)")
953

    
954
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
955
                                 type="keyval", default={},
956
                                 help="CPU count specs: list of key=value,"
957
                                 " where key is one of min, max, std")
958

    
959
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
960
                                  dest="ispecs_disk_count",
961
                                  type="keyval", default={},
962
                                  help="Disk count specs: list of key=value,"
963
                                  " where key is one of min, max, std")
964

    
965
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
966
                                 type="keyval", default={},
967
                                 help="Disk size specs: list of key=value,"
968
                                 " where key is one of min, max, std"
969
                                 " (in MB or using a unit)")
970

    
971
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
972
                                 type="keyval", default={},
973
                                 help="NIC count specs: list of key=value,"
974
                                 " where key is one of min, max, std")
975

    
976
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
977
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
978
                                      dest="ipolicy_bounds_specs",
979
                                      type="multilistidentkeyval", default=None,
980
                                      help="Complete instance specs limits")
981

    
982
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
983
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
984
                                   dest="ipolicy_std_specs",
985
                                   type="keyval", default=None,
986
                                   help="Complte standard instance specs")
987

    
988
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
989
                                    dest="ipolicy_disk_templates",
990
                                    type="list", default=None,
991
                                    help="Comma-separated list of"
992
                                    " enabled disk templates")
993

    
994
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
995
                                 dest="ipolicy_vcpu_ratio",
996
                                 type="maybefloat", default=None,
997
                                 help="The maximum allowed vcpu-to-cpu ratio")
998

    
999
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
1000
                                   dest="ipolicy_spindle_ratio",
1001
                                   type="maybefloat", default=None,
1002
                                   help=("The maximum allowed instances to"
1003
                                         " spindle ratio"))
1004

    
1005
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
1006
                            help="Hypervisor and hypervisor options, in the"
1007
                            " format hypervisor:option=value,option=value,...",
1008
                            default=None, type="identkeyval")
1009

    
1010
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1011
                        help="Hypervisor and hypervisor options, in the"
1012
                        " format hypervisor:option=value,option=value,...",
1013
                        default=[], action="append", type="identkeyval")
1014

    
1015
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1016
                           action="store_false",
1017
                           help="Don't check that the instance's IP"
1018
                           " is alive")
1019

    
1020
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1021
                             default=True, action="store_false",
1022
                             help="Don't check that the instance's name"
1023
                             " is resolvable")
1024

    
1025
NET_OPT = cli_option("--net",
1026
                     help="NIC parameters", default=[],
1027
                     dest="nics", action="append", type="identkeyval")
1028

    
1029
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1030
                      dest="disks", action="append", type="identkeyval")
1031

    
1032
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1033
                         help="Comma-separated list of disks"
1034
                         " indices to act on (e.g. 0,2) (optional,"
1035
                         " defaults to all disks)")
1036

    
1037
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1038
                         help="Enforces a single-disk configuration using the"
1039
                         " given disk size, in MiB unless a suffix is used",
1040
                         default=None, type="unit", metavar="<size>")
1041

    
1042
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1043
                                dest="ignore_consistency",
1044
                                action="store_true", default=False,
1045
                                help="Ignore the consistency of the disks on"
1046
                                " the secondary")
1047

    
1048
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1049
                                dest="allow_failover",
1050
                                action="store_true", default=False,
1051
                                help="If migration is not possible fallback to"
1052
                                     " failover")
1053

    
1054
NONLIVE_OPT = cli_option("--non-live", dest="live",
1055
                         default=True, action="store_false",
1056
                         help="Do a non-live migration (this usually means"
1057
                         " freeze the instance, save the state, transfer and"
1058
                         " only then resume running on the secondary node)")
1059

    
1060
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1061
                                default=None,
1062
                                choices=list(constants.HT_MIGRATION_MODES),
1063
                                help="Override default migration mode (choose"
1064
                                " either live or non-live")
1065

    
1066
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1067
                                help="Target node and optional secondary node",
1068
                                metavar="<pnode>[:<snode>]",
1069
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1070

    
1071
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1072
                           action="append", metavar="<node>",
1073
                           help="Use only this node (can be used multiple"
1074
                           " times, if not given defaults to all nodes)",
1075
                           completion_suggest=OPT_COMPL_ONE_NODE)
1076

    
1077
NODEGROUP_OPT_NAME = "--node-group"
1078
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1079
                           dest="nodegroup",
1080
                           help="Node group (name or uuid)",
1081
                           metavar="<nodegroup>",
1082
                           default=None, type="string",
1083
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1084

    
1085
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1086
                             metavar="<node>",
1087
                             completion_suggest=OPT_COMPL_ONE_NODE)
1088

    
1089
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1090
                         action="store_false",
1091
                         help="Don't start the instance after creation")
1092

    
1093
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1094
                         action="store_true", default=False,
1095
                         help="Show command instead of executing it")
1096

    
1097
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1098
                         default=False, action="store_true",
1099
                         help="Instead of performing the migration, try to"
1100
                         " recover from a failed cleanup. This is safe"
1101
                         " to run even if the instance is healthy, but it"
1102
                         " will create extra replication traffic and "
1103
                         " disrupt briefly the replication (like during the"
1104
                         " migration")
1105

    
1106
STATIC_OPT = cli_option("-s", "--static", dest="static",
1107
                        action="store_true", default=False,
1108
                        help="Only show configuration data, not runtime data")
1109

    
1110
ALL_OPT = cli_option("--all", dest="show_all",
1111
                     default=False, action="store_true",
1112
                     help="Show info on all instances on the cluster."
1113
                     " This can take a long time to run, use wisely")
1114

    
1115
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1116
                           action="store_true", default=False,
1117
                           help="Interactive OS reinstall, lists available"
1118
                           " OS templates for selection")
1119

    
1120
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1121
                                 action="store_true", default=False,
1122
                                 help="Remove the instance from the cluster"
1123
                                 " configuration even if there are failures"
1124
                                 " during the removal process")
1125

    
1126
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1127
                                        dest="ignore_remove_failures",
1128
                                        action="store_true", default=False,
1129
                                        help="Remove the instance from the"
1130
                                        " cluster configuration even if there"
1131
                                        " are failures during the removal"
1132
                                        " process")
1133

    
1134
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1135
                                 action="store_true", default=False,
1136
                                 help="Remove the instance from the cluster")
1137

    
1138
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1139
                               help="Specifies the new node for the instance",
1140
                               metavar="NODE", default=None,
1141
                               completion_suggest=OPT_COMPL_ONE_NODE)
1142

    
1143
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1144
                               help="Specifies the new secondary node",
1145
                               metavar="NODE", default=None,
1146
                               completion_suggest=OPT_COMPL_ONE_NODE)
1147

    
1148
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1149
                             help="Specifies the new primary node",
1150
                             metavar="<node>", default=None,
1151
                             completion_suggest=OPT_COMPL_ONE_NODE)
1152

    
1153
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1154
                            default=False, action="store_true",
1155
                            help="Replace the disk(s) on the primary"
1156
                                 " node (applies only to internally mirrored"
1157
                                 " disk templates, e.g. %s)" %
1158
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1159

    
1160
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1161
                              default=False, action="store_true",
1162
                              help="Replace the disk(s) on the secondary"
1163
                                   " node (applies only to internally mirrored"
1164
                                   " disk templates, e.g. %s)" %
1165
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1166

    
1167
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1168
                              default=False, action="store_true",
1169
                              help="Lock all nodes and auto-promote as needed"
1170
                              " to MC status")
1171

    
1172
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1173
                              default=False, action="store_true",
1174
                              help="Automatically replace faulty disks"
1175
                                   " (applies only to internally mirrored"
1176
                                   " disk templates, e.g. %s)" %
1177
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1178

    
1179
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1180
                             default=False, action="store_true",
1181
                             help="Ignore current recorded size"
1182
                             " (useful for forcing activation when"
1183
                             " the recorded size is wrong)")
1184

    
1185
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1186
                          metavar="<node>",
1187
                          completion_suggest=OPT_COMPL_ONE_NODE)
1188

    
1189
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1190
                         metavar="<dir>")
1191

    
1192
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1193
                              help="Specify the secondary ip for the node",
1194
                              metavar="ADDRESS", default=None)
1195

    
1196
READD_OPT = cli_option("--readd", dest="readd",
1197
                       default=False, action="store_true",
1198
                       help="Readd old node after replacing it")
1199

    
1200
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1201
                                default=True, action="store_false",
1202
                                help="Disable SSH key fingerprint checking")
1203

    
1204
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1205
                                 default=False, action="store_true",
1206
                                 help="Force the joining of a node")
1207

    
1208
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1209
                    type="bool", default=None, metavar=_YORNO,
1210
                    help="Set the master_candidate flag on the node")
1211

    
1212
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1213
                         type="bool", default=None,
1214
                         help=("Set the offline flag on the node"
1215
                               " (cluster does not communicate with offline"
1216
                               " nodes)"))
1217

    
1218
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1219
                         type="bool", default=None,
1220
                         help=("Set the drained flag on the node"
1221
                               " (excluded from allocation operations)"))
1222

    
1223
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1224
                              type="bool", default=None, metavar=_YORNO,
1225
                              help="Set the master_capable flag on the node")
1226

    
1227
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1228
                          type="bool", default=None, metavar=_YORNO,
1229
                          help="Set the vm_capable flag on the node")
1230

    
1231
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1232
                             type="bool", default=None, metavar=_YORNO,
1233
                             help="Set the allocatable flag on a volume")
1234

    
1235
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1236
                               help="Disable support for lvm based instances"
1237
                               " (cluster-wide)",
1238
                               action="store_false", default=True)
1239

    
1240
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1241
                            dest="enabled_hypervisors",
1242
                            help="Comma-separated list of hypervisors",
1243
                            type="string", default=None)
1244

    
1245
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1246
                                        dest="enabled_disk_templates",
1247
                                        help="Comma-separated list of "
1248
                                             "disk templates",
1249
                                        type="string", default=None)
1250

    
1251
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1252
                            type="keyval", default={},
1253
                            help="NIC parameters")
1254

    
1255
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1256
                         dest="candidate_pool_size", type="int",
1257
                         help="Set the candidate pool size")
1258

    
1259
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1260
                         help=("Enables LVM and specifies the volume group"
1261
                               " name (cluster-wide) for disk allocation"
1262
                               " [%s]" % constants.DEFAULT_VG),
1263
                         metavar="VG", default=None)
1264

    
1265
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1266
                          help="Destroy cluster", action="store_true")
1267

    
1268
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1269
                          help="Skip node agreement check (dangerous)",
1270
                          action="store_true", default=False)
1271

    
1272
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1273
                            help="Specify the mac prefix for the instance IP"
1274
                            " addresses, in the format XX:XX:XX",
1275
                            metavar="PREFIX",
1276
                            default=None)
1277

    
1278
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1279
                               help="Specify the node interface (cluster-wide)"
1280
                               " on which the master IP address will be added"
1281
                               " (cluster init default: %s)" %
1282
                               constants.DEFAULT_BRIDGE,
1283
                               metavar="NETDEV",
1284
                               default=None)
1285

    
1286
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1287
                                help="Specify the netmask of the master IP",
1288
                                metavar="NETMASK",
1289
                                default=None)
1290

    
1291
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1292
                                     dest="use_external_mip_script",
1293
                                     help="Specify whether to run a"
1294
                                     " user-provided script for the master"
1295
                                     " IP address turnup and"
1296
                                     " turndown operations",
1297
                                     type="bool", metavar=_YORNO, default=None)
1298

    
1299
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1300
                                help="Specify the default directory (cluster-"
1301
                                "wide) for storing the file-based disks [%s]" %
1302
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1303
                                metavar="DIR",
1304
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1305

    
1306
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1307
  "--shared-file-storage-dir",
1308
  dest="shared_file_storage_dir",
1309
  help="Specify the default directory (cluster-wide) for storing the"
1310
  " shared file-based disks [%s]" %
1311
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1312
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1313

    
1314
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1315
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1316
                                   action="store_false", default=True)
1317

    
1318
MODIFY_ETCHOSTS_OPT = \
1319
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1320
            default=None, type="bool",
1321
            help="Defines whether the cluster should autonomously modify"
1322
            " and keep in sync the /etc/hosts file of the nodes")
1323

    
1324
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1325
                                    help="Don't initialize SSH keys",
1326
                                    action="store_false", default=True)
1327

    
1328
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1329
                             help="Enable parseable error messages",
1330
                             action="store_true", default=False)
1331

    
1332
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1333
                          help="Skip N+1 memory redundancy tests",
1334
                          action="store_true", default=False)
1335

    
1336
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1337
                             help="Type of reboot: soft/hard/full",
1338
                             default=constants.INSTANCE_REBOOT_HARD,
1339
                             metavar="<REBOOT>",
1340
                             choices=list(constants.REBOOT_TYPES))
1341

    
1342
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1343
                                    dest="ignore_secondaries",
1344
                                    default=False, action="store_true",
1345
                                    help="Ignore errors from secondaries")
1346

    
1347
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1348
                            action="store_false", default=True,
1349
                            help="Don't shutdown the instance (unsafe)")
1350

    
1351
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1352
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1353
                         help="Maximum time to wait")
1354

    
1355
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1356
                                  dest="shutdown_timeout", type="int",
1357
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1358
                                  help="Maximum time to wait for instance"
1359
                                  " shutdown")
1360

    
1361
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1362
                          default=None,
1363
                          help=("Number of seconds between repetions of the"
1364
                                " command"))
1365

    
1366
EARLY_RELEASE_OPT = cli_option("--early-release",
1367
                               dest="early_release", default=False,
1368
                               action="store_true",
1369
                               help="Release the locks on the secondary"
1370
                               " node(s) early")
1371

    
1372
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1373
                                  dest="new_cluster_cert",
1374
                                  default=False, action="store_true",
1375
                                  help="Generate a new cluster certificate")
1376

    
1377
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1378
                           default=None,
1379
                           help="File containing new RAPI certificate")
1380

    
1381
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1382
                               default=None, action="store_true",
1383
                               help=("Generate a new self-signed RAPI"
1384
                                     " certificate"))
1385

    
1386
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1387
                            default=None,
1388
                            help="File containing new SPICE certificate")
1389

    
1390
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1391
                              default=None,
1392
                              help="File containing the certificate of the CA"
1393
                              " which signed the SPICE certificate")
1394

    
1395
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1396
                                dest="new_spice_cert", default=None,
1397
                                action="store_true",
1398
                                help=("Generate a new self-signed SPICE"
1399
                                      " certificate"))
1400

    
1401
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1402
                                    dest="new_confd_hmac_key",
1403
                                    default=False, action="store_true",
1404
                                    help=("Create a new HMAC key for %s" %
1405
                                          constants.CONFD))
1406

    
1407
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1408
                                       dest="cluster_domain_secret",
1409
                                       default=None,
1410
                                       help=("Load new new cluster domain"
1411
                                             " secret from file"))
1412

    
1413
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1414
                                           dest="new_cluster_domain_secret",
1415
                                           default=False, action="store_true",
1416
                                           help=("Create a new cluster domain"
1417
                                                 " secret"))
1418

    
1419
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1420
                              dest="use_replication_network",
1421
                              help="Whether to use the replication network"
1422
                              " for talking to the nodes",
1423
                              action="store_true", default=False)
1424

    
1425
MAINTAIN_NODE_HEALTH_OPT = \
1426
    cli_option("--maintain-node-health", dest="maintain_node_health",
1427
               metavar=_YORNO, default=None, type="bool",
1428
               help="Configure the cluster to automatically maintain node"
1429
               " health, by shutting down unknown instances, shutting down"
1430
               " unknown DRBD devices, etc.")
1431

    
1432
IDENTIFY_DEFAULTS_OPT = \
1433
    cli_option("--identify-defaults", dest="identify_defaults",
1434
               default=False, action="store_true",
1435
               help="Identify which saved instance parameters are equal to"
1436
               " the current cluster defaults and set them as such, instead"
1437
               " of marking them as overridden")
1438

    
1439
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1440
                         action="store", dest="uid_pool",
1441
                         help=("A list of user-ids or user-id"
1442
                               " ranges separated by commas"))
1443

    
1444
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1445
                          action="store", dest="add_uids",
1446
                          help=("A list of user-ids or user-id"
1447
                                " ranges separated by commas, to be"
1448
                                " added to the user-id pool"))
1449

    
1450
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1451
                             action="store", dest="remove_uids",
1452
                             help=("A list of user-ids or user-id"
1453
                                   " ranges separated by commas, to be"
1454
                                   " removed from the user-id pool"))
1455

    
1456
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1457
                              action="store", dest="reserved_lvs",
1458
                              help=("A comma-separated list of reserved"
1459
                                    " logical volumes names, that will be"
1460
                                    " ignored by cluster verify"))
1461

    
1462
ROMAN_OPT = cli_option("--roman",
1463
                       dest="roman_integers", default=False,
1464
                       action="store_true",
1465
                       help="Use roman numbers for positive integers")
1466

    
1467
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1468
                             action="store", default=None,
1469
                             help="Specifies usermode helper for DRBD")
1470

    
1471
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1472
                                action="store_false", default=True,
1473
                                help="Disable support for DRBD")
1474

    
1475
PRIMARY_IP_VERSION_OPT = \
1476
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1477
               action="store", dest="primary_ip_version",
1478
               metavar="%d|%d" % (constants.IP4_VERSION,
1479
                                  constants.IP6_VERSION),
1480
               help="Cluster-wide IP version for primary IP")
1481

    
1482
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1483
                              action="store_true",
1484
                              help="Show machine name for every line in output")
1485

    
1486
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1487
                              action="store_true",
1488
                              help=("Hide successful results and show failures"
1489
                                    " only (determined by the exit code)"))
1490

    
1491
REASON_OPT = cli_option("--reason", default=None,
1492
                        help="The reason for executing the command")
1493

    
1494

    
1495
def _PriorityOptionCb(option, _, value, parser):
1496
  """Callback for processing C{--priority} option.
1497

1498
  """
1499
  value = _PRIONAME_TO_VALUE[value]
1500

    
1501
  setattr(parser.values, option.dest, value)
1502

    
1503

    
1504
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1505
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1506
                          choices=_PRIONAME_TO_VALUE.keys(),
1507
                          action="callback", type="choice",
1508
                          callback=_PriorityOptionCb,
1509
                          help="Priority for opcode processing")
1510

    
1511
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1512
                        type="bool", default=None, metavar=_YORNO,
1513
                        help="Sets the hidden flag on the OS")
1514

    
1515
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1516
                        type="bool", default=None, metavar=_YORNO,
1517
                        help="Sets the blacklisted flag on the OS")
1518

    
1519
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1520
                                     type="bool", metavar=_YORNO,
1521
                                     dest="prealloc_wipe_disks",
1522
                                     help=("Wipe disks prior to instance"
1523
                                           " creation"))
1524

    
1525
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1526
                             type="keyval", default=None,
1527
                             help="Node parameters")
1528

    
1529
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1530
                              action="store", metavar="POLICY", default=None,
1531
                              help="Allocation policy for the node group")
1532

    
1533
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1534
                              type="bool", metavar=_YORNO,
1535
                              dest="node_powered",
1536
                              help="Specify if the SoR for node is powered")
1537

    
1538
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1539
                             default=constants.OOB_TIMEOUT,
1540
                             help="Maximum time to wait for out-of-band helper")
1541

    
1542
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1543
                             default=constants.OOB_POWER_DELAY,
1544
                             help="Time in seconds to wait between power-ons")
1545

    
1546
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1547
                              action="store_true", default=False,
1548
                              help=("Whether command argument should be treated"
1549
                                    " as filter"))
1550

    
1551
NO_REMEMBER_OPT = cli_option("--no-remember",
1552
                             dest="no_remember",
1553
                             action="store_true", default=False,
1554
                             help="Perform but do not record the change"
1555
                             " in the configuration")
1556

    
1557
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1558
                              default=False, action="store_true",
1559
                              help="Evacuate primary instances only")
1560

    
1561
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1562
                                default=False, action="store_true",
1563
                                help="Evacuate secondary instances only"
1564
                                     " (applies only to internally mirrored"
1565
                                     " disk templates, e.g. %s)" %
1566
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1567

    
1568
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1569
                                action="store_true", default=False,
1570
                                help="Pause instance at startup")
1571

    
1572
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1573
                          help="Destination node group (name or uuid)",
1574
                          default=None, action="append",
1575
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1576

    
1577
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1578
                               action="append", dest="ignore_errors",
1579
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1580
                               help="Error code to be ignored")
1581

    
1582
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1583
                            action="append",
1584
                            help=("Specify disk state information in the"
1585
                                  " format"
1586
                                  " storage_type/identifier:option=value,...;"
1587
                                  " note this is unused for now"),
1588
                            type="identkeyval")
1589

    
1590
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1591
                          action="append",
1592
                          help=("Specify hypervisor state information in the"
1593
                                " format hypervisor:option=value,...;"
1594
                                " note this is unused for now"),
1595
                          type="identkeyval")
1596

    
1597
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1598
                                action="store_true", default=False,
1599
                                help="Ignore instance policy violations")
1600

    
1601
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1602
                             help="Sets the instance's runtime memory,"
1603
                             " ballooning it up or down to the new value",
1604
                             default=None, type="unit", metavar="<size>")
1605

    
1606
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1607
                          action="store_true", default=False,
1608
                          help="Marks the grow as absolute instead of the"
1609
                          " (default) relative mode")
1610

    
1611
NETWORK_OPT = cli_option("--network",
1612
                         action="store", default=None, dest="network",
1613
                         help="IP network in CIDR notation")
1614

    
1615
GATEWAY_OPT = cli_option("--gateway",
1616
                         action="store", default=None, dest="gateway",
1617
                         help="IP address of the router (gateway)")
1618

    
1619
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1620
                                  action="store", default=None,
1621
                                  dest="add_reserved_ips",
1622
                                  help="Comma-separated list of"
1623
                                  " reserved IPs to add")
1624

    
1625
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1626
                                     action="store", default=None,
1627
                                     dest="remove_reserved_ips",
1628
                                     help="Comma-delimited list of"
1629
                                     " reserved IPs to remove")
1630

    
1631
NETWORK6_OPT = cli_option("--network6",
1632
                          action="store", default=None, dest="network6",
1633
                          help="IP network in CIDR notation")
1634

    
1635
GATEWAY6_OPT = cli_option("--gateway6",
1636
                          action="store", default=None, dest="gateway6",
1637
                          help="IP6 address of the router (gateway)")
1638

    
1639
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1640
                                  dest="conflicts_check",
1641
                                  default=True,
1642
                                  action="store_false",
1643
                                  help="Don't check for conflicting IPs")
1644

    
1645
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1646
                                 default=False, action="store_true",
1647
                                 help="Include default values")
1648

    
1649
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1650
                         action="store_true", default=False,
1651
                         help="Try to hotplug device")
1652

    
1653
#: Options provided by all commands
1654
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1655

    
1656
# options related to asynchronous job handling
1657

    
1658
SUBMIT_OPTS = [
1659
  SUBMIT_OPT,
1660
  PRINT_JOBID_OPT,
1661
  ]
1662

    
1663
# common options for creating instances. add and import then add their own
1664
# specific ones.
1665
COMMON_CREATE_OPTS = [
1666
  BACKEND_OPT,
1667
  DISK_OPT,
1668
  DISK_TEMPLATE_OPT,
1669
  FILESTORE_DIR_OPT,
1670
  FILESTORE_DRIVER_OPT,
1671
  HYPERVISOR_OPT,
1672
  IALLOCATOR_OPT,
1673
  NET_OPT,
1674
  NODE_PLACEMENT_OPT,
1675
  NOIPCHECK_OPT,
1676
  NOCONFLICTSCHECK_OPT,
1677
  NONAMECHECK_OPT,
1678
  NONICS_OPT,
1679
  NWSYNC_OPT,
1680
  OSPARAMS_OPT,
1681
  OS_SIZE_OPT,
1682
  SUBMIT_OPT,
1683
  PRINT_JOBID_OPT,
1684
  TAG_ADD_OPT,
1685
  DRY_RUN_OPT,
1686
  PRIORITY_OPT,
1687
  ]
1688

    
1689
# common instance policy options
1690
INSTANCE_POLICY_OPTS = [
1691
  IPOLICY_BOUNDS_SPECS_OPT,
1692
  IPOLICY_DISK_TEMPLATES,
1693
  IPOLICY_VCPU_RATIO,
1694
  IPOLICY_SPINDLE_RATIO,
1695
  ]
1696

    
1697
# instance policy split specs options
1698
SPLIT_ISPECS_OPTS = [
1699
  SPECS_CPU_COUNT_OPT,
1700
  SPECS_DISK_COUNT_OPT,
1701
  SPECS_DISK_SIZE_OPT,
1702
  SPECS_MEM_SIZE_OPT,
1703
  SPECS_NIC_COUNT_OPT,
1704
  ]
1705

    
1706

    
1707
class _ShowUsage(Exception):
1708
  """Exception class for L{_ParseArgs}.
1709

1710
  """
1711
  def __init__(self, exit_error):
1712
    """Initializes instances of this class.
1713

1714
    @type exit_error: bool
1715
    @param exit_error: Whether to report failure on exit
1716

1717
    """
1718
    Exception.__init__(self)
1719
    self.exit_error = exit_error
1720

    
1721

    
1722
class _ShowVersion(Exception):
1723
  """Exception class for L{_ParseArgs}.
1724

1725
  """
1726

    
1727

    
1728
def _ParseArgs(binary, argv, commands, aliases, env_override):
1729
  """Parser for the command line arguments.
1730

1731
  This function parses the arguments and returns the function which
1732
  must be executed together with its (modified) arguments.
1733

1734
  @param binary: Script name
1735
  @param argv: Command line arguments
1736
  @param commands: Dictionary containing command definitions
1737
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1738
  @param env_override: list of env variables allowed for default args
1739
  @raise _ShowUsage: If usage description should be shown
1740
  @raise _ShowVersion: If version should be shown
1741

1742
  """
1743
  assert not (env_override - set(commands))
1744
  assert not (set(aliases.keys()) & set(commands.keys()))
1745

    
1746
  if len(argv) > 1:
1747
    cmd = argv[1]
1748
  else:
1749
    # No option or command given
1750
    raise _ShowUsage(exit_error=True)
1751

    
1752
  if cmd == "--version":
1753
    raise _ShowVersion()
1754
  elif cmd == "--help":
1755
    raise _ShowUsage(exit_error=False)
1756
  elif not (cmd in commands or cmd in aliases):
1757
    raise _ShowUsage(exit_error=True)
1758

    
1759
  # get command, unalias it, and look it up in commands
1760
  if cmd in aliases:
1761
    if aliases[cmd] not in commands:
1762
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1763
                                   " command '%s'" % (cmd, aliases[cmd]))
1764

    
1765
    cmd = aliases[cmd]
1766

    
1767
  if cmd in env_override:
1768
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1769
    env_args = os.environ.get(args_env_name)
1770
    if env_args:
1771
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1772

    
1773
  func, args_def, parser_opts, usage, description = commands[cmd]
1774
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1775
                        description=description,
1776
                        formatter=TitledHelpFormatter(),
1777
                        usage="%%prog %s %s" % (cmd, usage))
1778
  parser.disable_interspersed_args()
1779
  options, args = parser.parse_args(args=argv[2:])
1780

    
1781
  if not _CheckArguments(cmd, args_def, args):
1782
    return None, None, None
1783

    
1784
  return func, options, args
1785

    
1786

    
1787
def _FormatUsage(binary, commands):
1788
  """Generates a nice description of all commands.
1789

1790
  @param binary: Script name
1791
  @param commands: Dictionary containing command definitions
1792

1793
  """
1794
  # compute the max line length for cmd + usage
1795
  mlen = min(60, max(map(len, commands)))
1796

    
1797
  yield "Usage: %s {command} [options...] [argument...]" % binary
1798
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1799
  yield ""
1800
  yield "Commands:"
1801

    
1802
  # and format a nice command list
1803
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1804
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1805
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1806
    for line in help_lines:
1807
      yield " %-*s   %s" % (mlen, "", line)
1808

    
1809
  yield ""
1810

    
1811

    
1812
def _CheckArguments(cmd, args_def, args):
1813
  """Verifies the arguments using the argument definition.
1814

1815
  Algorithm:
1816

1817
    1. Abort with error if values specified by user but none expected.
1818

1819
    1. For each argument in definition
1820

1821
      1. Keep running count of minimum number of values (min_count)
1822
      1. Keep running count of maximum number of values (max_count)
1823
      1. If it has an unlimited number of values
1824

1825
        1. Abort with error if it's not the last argument in the definition
1826

1827
    1. If last argument has limited number of values
1828

1829
      1. Abort with error if number of values doesn't match or is too large
1830

1831
    1. Abort with error if user didn't pass enough values (min_count)
1832

1833
  """
1834
  if args and not args_def:
1835
    ToStderr("Error: Command %s expects no arguments", cmd)
1836
    return False
1837

    
1838
  min_count = None
1839
  max_count = None
1840
  check_max = None
1841

    
1842
  last_idx = len(args_def) - 1
1843

    
1844
  for idx, arg in enumerate(args_def):
1845
    if min_count is None:
1846
      min_count = arg.min
1847
    elif arg.min is not None:
1848
      min_count += arg.min
1849

    
1850
    if max_count is None:
1851
      max_count = arg.max
1852
    elif arg.max is not None:
1853
      max_count += arg.max
1854

    
1855
    if idx == last_idx:
1856
      check_max = (arg.max is not None)
1857

    
1858
    elif arg.max is None:
1859
      raise errors.ProgrammerError("Only the last argument can have max=None")
1860

    
1861
  if check_max:
1862
    # Command with exact number of arguments
1863
    if (min_count is not None and max_count is not None and
1864
        min_count == max_count and len(args) != min_count):
1865
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1866
      return False
1867

    
1868
    # Command with limited number of arguments
1869
    if max_count is not None and len(args) > max_count:
1870
      ToStderr("Error: Command %s expects only %d argument(s)",
1871
               cmd, max_count)
1872
      return False
1873

    
1874
  # Command with some required arguments
1875
  if min_count is not None and len(args) < min_count:
1876
    ToStderr("Error: Command %s expects at least %d argument(s)",
1877
             cmd, min_count)
1878
    return False
1879

    
1880
  return True
1881

    
1882

    
1883
def SplitNodeOption(value):
1884
  """Splits the value of a --node option.
1885

1886
  """
1887
  if value and ":" in value:
1888
    return value.split(":", 1)
1889
  else:
1890
    return (value, None)
1891

    
1892

    
1893
def CalculateOSNames(os_name, os_variants):
1894
  """Calculates all the names an OS can be called, according to its variants.
1895

1896
  @type os_name: string
1897
  @param os_name: base name of the os
1898
  @type os_variants: list or None
1899
  @param os_variants: list of supported variants
1900
  @rtype: list
1901
  @return: list of valid names
1902

1903
  """
1904
  if os_variants:
1905
    return ["%s+%s" % (os_name, v) for v in os_variants]
1906
  else:
1907
    return [os_name]
1908

    
1909

    
1910
def ParseFields(selected, default):
1911
  """Parses the values of "--field"-like options.
1912

1913
  @type selected: string or None
1914
  @param selected: User-selected options
1915
  @type default: list
1916
  @param default: Default fields
1917

1918
  """
1919
  if selected is None:
1920
    return default
1921

    
1922
  if selected.startswith("+"):
1923
    return default + selected[1:].split(",")
1924

    
1925
  return selected.split(",")
1926

    
1927

    
1928
UsesRPC = rpc.RunWithRPC
1929

    
1930

    
1931
def AskUser(text, choices=None):
1932
  """Ask the user a question.
1933

1934
  @param text: the question to ask
1935

1936
  @param choices: list with elements tuples (input_char, return_value,
1937
      description); if not given, it will default to: [('y', True,
1938
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1939
      note that the '?' char is reserved for help
1940

1941
  @return: one of the return values from the choices list; if input is
1942
      not possible (i.e. not running with a tty, we return the last
1943
      entry from the list
1944

1945
  """
1946
  if choices is None:
1947
    choices = [("y", True, "Perform the operation"),
1948
               ("n", False, "Do not perform the operation")]
1949
  if not choices or not isinstance(choices, list):
1950
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1951
  for entry in choices:
1952
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1953
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1954

    
1955
  answer = choices[-1][1]
1956
  new_text = []
1957
  for line in text.splitlines():
1958
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1959
  text = "\n".join(new_text)
1960
  try:
1961
    f = file("/dev/tty", "a+")
1962
  except IOError:
1963
    return answer
1964
  try:
1965
    chars = [entry[0] for entry in choices]
1966
    chars[-1] = "[%s]" % chars[-1]
1967
    chars.append("?")
1968
    maps = dict([(entry[0], entry[1]) for entry in choices])
1969
    while True:
1970
      f.write(text)
1971
      f.write("\n")
1972
      f.write("/".join(chars))
1973
      f.write(": ")
1974
      line = f.readline(2).strip().lower()
1975
      if line in maps:
1976
        answer = maps[line]
1977
        break
1978
      elif line == "?":
1979
        for entry in choices:
1980
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1981
        f.write("\n")
1982
        continue
1983
  finally:
1984
    f.close()
1985
  return answer
1986

    
1987

    
1988
class JobSubmittedException(Exception):
1989
  """Job was submitted, client should exit.
1990

1991
  This exception has one argument, the ID of the job that was
1992
  submitted. The handler should print this ID.
1993

1994
  This is not an error, just a structured way to exit from clients.
1995

1996
  """
1997

    
1998

    
1999
def SendJob(ops, cl=None):
2000
  """Function to submit an opcode without waiting for the results.
2001

2002
  @type ops: list
2003
  @param ops: list of opcodes
2004
  @type cl: luxi.Client
2005
  @param cl: the luxi client to use for communicating with the master;
2006
             if None, a new client will be created
2007

2008
  """
2009
  if cl is None:
2010
    cl = GetClient()
2011

    
2012
  job_id = cl.SubmitJob(ops)
2013

    
2014
  return job_id
2015

    
2016

    
2017
def GenericPollJob(job_id, cbs, report_cbs):
2018
  """Generic job-polling function.
2019

2020
  @type job_id: number
2021
  @param job_id: Job ID
2022
  @type cbs: Instance of L{JobPollCbBase}
2023
  @param cbs: Data callbacks
2024
  @type report_cbs: Instance of L{JobPollReportCbBase}
2025
  @param report_cbs: Reporting callbacks
2026

2027
  """
2028
  prev_job_info = None
2029
  prev_logmsg_serial = None
2030

    
2031
  status = None
2032

    
2033
  while True:
2034
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2035
                                      prev_logmsg_serial)
2036
    if not result:
2037
      # job not found, go away!
2038
      raise errors.JobLost("Job with id %s lost" % job_id)
2039

    
2040
    if result == constants.JOB_NOTCHANGED:
2041
      report_cbs.ReportNotChanged(job_id, status)
2042

    
2043
      # Wait again
2044
      continue
2045

    
2046
    # Split result, a tuple of (field values, log entries)
2047
    (job_info, log_entries) = result
2048
    (status, ) = job_info
2049

    
2050
    if log_entries:
2051
      for log_entry in log_entries:
2052
        (serial, timestamp, log_type, message) = log_entry
2053
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2054
                                    log_type, message)
2055
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2056

    
2057
    # TODO: Handle canceled and archived jobs
2058
    elif status in (constants.JOB_STATUS_SUCCESS,
2059
                    constants.JOB_STATUS_ERROR,
2060
                    constants.JOB_STATUS_CANCELING,
2061
                    constants.JOB_STATUS_CANCELED):
2062
      break
2063

    
2064
    prev_job_info = job_info
2065

    
2066
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2067
  if not jobs:
2068
    raise errors.JobLost("Job with id %s lost" % job_id)
2069

    
2070
  status, opstatus, result = jobs[0]
2071

    
2072
  if status == constants.JOB_STATUS_SUCCESS:
2073
    return result
2074

    
2075
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2076
    raise errors.OpExecError("Job was canceled")
2077

    
2078
  has_ok = False
2079
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2080
    if status == constants.OP_STATUS_SUCCESS:
2081
      has_ok = True
2082
    elif status == constants.OP_STATUS_ERROR:
2083
      errors.MaybeRaise(msg)
2084

    
2085
      if has_ok:
2086
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2087
                                 (idx, msg))
2088

    
2089
      raise errors.OpExecError(str(msg))
2090

    
2091
  # default failure mode
2092
  raise errors.OpExecError(result)
2093

    
2094

    
2095
class JobPollCbBase:
2096
  """Base class for L{GenericPollJob} callbacks.
2097

2098
  """
2099
  def __init__(self):
2100
    """Initializes this class.
2101

2102
    """
2103

    
2104
  def WaitForJobChangeOnce(self, job_id, fields,
2105
                           prev_job_info, prev_log_serial):
2106
    """Waits for changes on a job.
2107

2108
    """
2109
    raise NotImplementedError()
2110

    
2111
  def QueryJobs(self, job_ids, fields):
2112
    """Returns the selected fields for the selected job IDs.
2113

2114
    @type job_ids: list of numbers
2115
    @param job_ids: Job IDs
2116
    @type fields: list of strings
2117
    @param fields: Fields
2118

2119
    """
2120
    raise NotImplementedError()
2121

    
2122

    
2123
class JobPollReportCbBase:
2124
  """Base class for L{GenericPollJob} reporting callbacks.
2125

2126
  """
2127
  def __init__(self):
2128
    """Initializes this class.
2129

2130
    """
2131

    
2132
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2133
    """Handles a log message.
2134

2135
    """
2136
    raise NotImplementedError()
2137

    
2138
  def ReportNotChanged(self, job_id, status):
2139
    """Called for if a job hasn't changed in a while.
2140

2141
    @type job_id: number
2142
    @param job_id: Job ID
2143
    @type status: string or None
2144
    @param status: Job status if available
2145

2146
    """
2147
    raise NotImplementedError()
2148

    
2149

    
2150
class _LuxiJobPollCb(JobPollCbBase):
2151
  def __init__(self, cl):
2152
    """Initializes this class.
2153

2154
    """
2155
    JobPollCbBase.__init__(self)
2156
    self.cl = cl
2157

    
2158
  def WaitForJobChangeOnce(self, job_id, fields,
2159
                           prev_job_info, prev_log_serial):
2160
    """Waits for changes on a job.
2161

2162
    """
2163
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2164
                                        prev_job_info, prev_log_serial)
2165

    
2166
  def QueryJobs(self, job_ids, fields):
2167
    """Returns the selected fields for the selected job IDs.
2168

2169
    """
2170
    return self.cl.QueryJobs(job_ids, fields)
2171

    
2172

    
2173
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2174
  def __init__(self, feedback_fn):
2175
    """Initializes this class.
2176

2177
    """
2178
    JobPollReportCbBase.__init__(self)
2179

    
2180
    self.feedback_fn = feedback_fn
2181

    
2182
    assert callable(feedback_fn)
2183

    
2184
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2185
    """Handles a log message.
2186

2187
    """
2188
    self.feedback_fn((timestamp, log_type, log_msg))
2189

    
2190
  def ReportNotChanged(self, job_id, status):
2191
    """Called if a job hasn't changed in a while.
2192

2193
    """
2194
    # Ignore
2195

    
2196

    
2197
class StdioJobPollReportCb(JobPollReportCbBase):
2198
  def __init__(self):
2199
    """Initializes this class.
2200

2201
    """
2202
    JobPollReportCbBase.__init__(self)
2203

    
2204
    self.notified_queued = False
2205
    self.notified_waitlock = False
2206

    
2207
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2208
    """Handles a log message.
2209

2210
    """
2211
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2212
             FormatLogMessage(log_type, log_msg))
2213

    
2214
  def ReportNotChanged(self, job_id, status):
2215
    """Called if a job hasn't changed in a while.
2216

2217
    """
2218
    if status is None:
2219
      return
2220

    
2221
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2222
      ToStderr("Job %s is waiting in queue", job_id)
2223
      self.notified_queued = True
2224

    
2225
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2226
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2227
      self.notified_waitlock = True
2228

    
2229

    
2230
def FormatLogMessage(log_type, log_msg):
2231
  """Formats a job message according to its type.
2232

2233
  """
2234
  if log_type != constants.ELOG_MESSAGE:
2235
    log_msg = str(log_msg)
2236

    
2237
  return utils.SafeEncode(log_msg)
2238

    
2239

    
2240
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2241
  """Function to poll for the result of a job.
2242

2243
  @type job_id: job identified
2244
  @param job_id: the job to poll for results
2245
  @type cl: luxi.Client
2246
  @param cl: the luxi client to use for communicating with the master;
2247
             if None, a new client will be created
2248

2249
  """
2250
  if cl is None:
2251
    cl = GetClient()
2252

    
2253
  if reporter is None:
2254
    if feedback_fn:
2255
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2256
    else:
2257
      reporter = StdioJobPollReportCb()
2258
  elif feedback_fn:
2259
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2260

    
2261
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2262

    
2263

    
2264
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2265
  """Legacy function to submit an opcode.
2266

2267
  This is just a simple wrapper over the construction of the processor
2268
  instance. It should be extended to better handle feedback and
2269
  interaction functions.
2270

2271
  """
2272
  if cl is None:
2273
    cl = GetClient()
2274

    
2275
  SetGenericOpcodeOpts([op], opts)
2276

    
2277
  job_id = SendJob([op], cl=cl)
2278
  if hasattr(opts, "print_jobid") and opts.print_jobid:
2279
    ToStdout("%d" % job_id)
2280

    
2281
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2282
                       reporter=reporter)
2283

    
2284
  return op_results[0]
2285

    
2286

    
2287
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2288
  """Wrapper around SubmitOpCode or SendJob.
2289

2290
  This function will decide, based on the 'opts' parameter, whether to
2291
  submit and wait for the result of the opcode (and return it), or
2292
  whether to just send the job and print its identifier. It is used in
2293
  order to simplify the implementation of the '--submit' option.
2294

2295
  It will also process the opcodes if we're sending the via SendJob
2296
  (otherwise SubmitOpCode does it).
2297

2298
  """
2299
  if opts and opts.submit_only:
2300
    job = [op]
2301
    SetGenericOpcodeOpts(job, opts)
2302
    job_id = SendJob(job, cl=cl)
2303
    if opts.print_jobid:
2304
      ToStdout("%d" % job_id)
2305
    raise JobSubmittedException(job_id)
2306
  else:
2307
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2308

    
2309

    
2310
def _InitReasonTrail(op, opts):
2311
  """Builds the first part of the reason trail
2312

2313
  Builds the initial part of the reason trail, adding the user provided reason
2314
  (if it exists) and the name of the command starting the operation.
2315

2316
  @param op: the opcode the reason trail will be added to
2317
  @param opts: the command line options selected by the user
2318

2319
  """
2320
  assert len(sys.argv) >= 2
2321
  trail = []
2322

    
2323
  if opts.reason:
2324
    trail.append((constants.OPCODE_REASON_SRC_USER,
2325
                  opts.reason,
2326
                  utils.EpochNano()))
2327

    
2328
  binary = os.path.basename(sys.argv[0])
2329
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2330
  command = sys.argv[1]
2331
  trail.append((source, command, utils.EpochNano()))
2332
  op.reason = trail
2333

    
2334

    
2335
def SetGenericOpcodeOpts(opcode_list, options):
2336
  """Processor for generic options.
2337

2338
  This function updates the given opcodes based on generic command
2339
  line options (like debug, dry-run, etc.).
2340

2341
  @param opcode_list: list of opcodes
2342
  @param options: command line options or None
2343
  @return: None (in-place modification)
2344

2345
  """
2346
  if not options:
2347
    return
2348
  for op in opcode_list:
2349
    op.debug_level = options.debug
2350
    if hasattr(options, "dry_run"):
2351
      op.dry_run = options.dry_run
2352
    if getattr(options, "priority", None) is not None:
2353
      op.priority = options.priority
2354
    _InitReasonTrail(op, options)
2355

    
2356

    
2357
def GetClient(query=False):
2358
  """Connects to the a luxi socket and returns a client.
2359

2360
  @type query: boolean
2361
  @param query: this signifies that the client will only be
2362
      used for queries; if the build-time parameter
2363
      enable-split-queries is enabled, then the client will be
2364
      connected to the query socket instead of the masterd socket
2365

2366
  """
2367
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2368
  if override_socket:
2369
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2370
      address = pathutils.MASTER_SOCKET
2371
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2372
      address = pathutils.QUERY_SOCKET
2373
    else:
2374
      address = override_socket
2375
  elif query and constants.ENABLE_SPLIT_QUERY:
2376
    address = pathutils.QUERY_SOCKET
2377
  else:
2378
    address = None
2379
  # TODO: Cache object?
2380
  try:
2381
    client = luxi.Client(address=address)
2382
  except luxi.NoMasterError:
2383
    ss = ssconf.SimpleStore()
2384

    
2385
    # Try to read ssconf file
2386
    try:
2387
      ss.GetMasterNode()
2388
    except errors.ConfigurationError:
2389
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2390
                                 " not part of a cluster",
2391
                                 errors.ECODE_INVAL)
2392

    
2393
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2394
    if master != myself:
2395
      raise errors.OpPrereqError("This is not the master node, please connect"
2396
                                 " to node '%s' and rerun the command" %
2397
                                 master, errors.ECODE_INVAL)
2398
    raise
2399
  return client
2400

    
2401

    
2402
def FormatError(err):
2403
  """Return a formatted error message for a given error.
2404

2405
  This function takes an exception instance and returns a tuple
2406
  consisting of two values: first, the recommended exit code, and
2407
  second, a string describing the error message (not
2408
  newline-terminated).
2409

2410
  """
2411
  retcode = 1
2412
  obuf = StringIO()
2413
  msg = str(err)
2414
  if isinstance(err, errors.ConfigurationError):
2415
    txt = "Corrupt configuration file: %s" % msg
2416
    logging.error(txt)
2417
    obuf.write(txt + "\n")
2418
    obuf.write("Aborting.")
2419
    retcode = 2
2420
  elif isinstance(err, errors.HooksAbort):
2421
    obuf.write("Failure: hooks execution failed:\n")
2422
    for node, script, out in err.args[0]:
2423
      if out:
2424
        obuf.write("  node: %s, script: %s, output: %s\n" %
2425
                   (node, script, out))
2426
      else:
2427
        obuf.write("  node: %s, script: %s (no output)\n" %
2428
                   (node, script))
2429
  elif isinstance(err, errors.HooksFailure):
2430
    obuf.write("Failure: hooks general failure: %s" % msg)
2431
  elif isinstance(err, errors.ResolverError):
2432
    this_host = netutils.Hostname.GetSysName()
2433
    if err.args[0] == this_host:
2434
      msg = "Failure: can't resolve my own hostname ('%s')"
2435
    else:
2436
      msg = "Failure: can't resolve hostname '%s'"
2437
    obuf.write(msg % err.args[0])
2438
  elif isinstance(err, errors.OpPrereqError):
2439
    if len(err.args) == 2:
2440
      obuf.write("Failure: prerequisites not met for this"
2441
                 " operation:\nerror type: %s, error details:\n%s" %
2442
                 (err.args[1], err.args[0]))
2443
    else:
2444
      obuf.write("Failure: prerequisites not met for this"
2445
                 " operation:\n%s" % msg)
2446
  elif isinstance(err, errors.OpExecError):
2447
    obuf.write("Failure: command execution error:\n%s" % msg)
2448
  elif isinstance(err, errors.TagError):
2449
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2450
  elif isinstance(err, errors.JobQueueDrainError):
2451
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2452
               " accept new requests\n")
2453
  elif isinstance(err, errors.JobQueueFull):
2454
    obuf.write("Failure: the job queue is full and doesn't accept new"
2455
               " job submissions until old jobs are archived\n")
2456
  elif isinstance(err, errors.TypeEnforcementError):
2457
    obuf.write("Parameter Error: %s" % msg)
2458
  elif isinstance(err, errors.ParameterError):
2459
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2460
  elif isinstance(err, luxi.NoMasterError):
2461
    if err.args[0] == pathutils.MASTER_SOCKET:
2462
      daemon = "the master daemon"
2463
    elif err.args[0] == pathutils.QUERY_SOCKET:
2464
      daemon = "the config daemon"
2465
    else:
2466
      daemon = "socket '%s'" % str(err.args[0])
2467
    obuf.write("Cannot communicate with %s.\nIs the process running"
2468
               " and listening for connections?" % daemon)
2469
  elif isinstance(err, luxi.TimeoutError):
2470
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2471
               " been submitted and will continue to run even if the call"
2472
               " timed out. Useful commands in this situation are \"gnt-job"
2473
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2474
    obuf.write(msg)
2475
  elif isinstance(err, luxi.PermissionError):
2476
    obuf.write("It seems you don't have permissions to connect to the"
2477
               " master daemon.\nPlease retry as a different user.")
2478
  elif isinstance(err, luxi.ProtocolError):
2479
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2480
               "%s" % msg)
2481
  elif isinstance(err, errors.JobLost):
2482
    obuf.write("Error checking job status: %s" % msg)
2483
  elif isinstance(err, errors.QueryFilterParseError):
2484
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2485
    obuf.write("\n".join(err.GetDetails()))
2486
  elif isinstance(err, errors.GenericError):
2487
    obuf.write("Unhandled Ganeti error: %s" % msg)
2488
  elif isinstance(err, JobSubmittedException):
2489
    obuf.write("JobID: %s\n" % err.args[0])
2490
    retcode = 0
2491
  else:
2492
    obuf.write("Unhandled exception: %s" % msg)
2493
  return retcode, obuf.getvalue().rstrip("\n")
2494

    
2495

    
2496
def GenericMain(commands, override=None, aliases=None,
2497
                env_override=frozenset()):
2498
  """Generic main function for all the gnt-* commands.
2499

2500
  @param commands: a dictionary with a special structure, see the design doc
2501
                   for command line handling.
2502
  @param override: if not None, we expect a dictionary with keys that will
2503
                   override command line options; this can be used to pass
2504
                   options from the scripts to generic functions
2505
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2506
  @param env_override: list of environment names which are allowed to submit
2507
                       default args for commands
2508

2509
  """
2510
  # save the program name and the entire command line for later logging
2511
  if sys.argv:
2512
    binary = os.path.basename(sys.argv[0])
2513
    if not binary:
2514
      binary = sys.argv[0]
2515

    
2516
    if len(sys.argv) >= 2:
2517
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2518
    else:
2519
      logname = binary
2520

    
2521
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2522
  else:
2523
    binary = "<unknown program>"
2524
    cmdline = "<unknown>"
2525

    
2526
  if aliases is None:
2527
    aliases = {}
2528

    
2529
  try:
2530
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2531
                                       env_override)
2532
  except _ShowVersion:
2533
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2534
             constants.RELEASE_VERSION)
2535
    return constants.EXIT_SUCCESS
2536
  except _ShowUsage, err:
2537
    for line in _FormatUsage(binary, commands):
2538
      ToStdout(line)
2539

    
2540
    if err.exit_error:
2541
      return constants.EXIT_FAILURE
2542
    else:
2543
      return constants.EXIT_SUCCESS
2544
  except errors.ParameterError, err:
2545
    result, err_msg = FormatError(err)
2546
    ToStderr(err_msg)
2547
    return 1
2548

    
2549
  if func is None: # parse error
2550
    return 1
2551

    
2552
  if override is not None:
2553
    for key, val in override.iteritems():
2554
      setattr(options, key, val)
2555

    
2556
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2557
                     stderr_logging=True)
2558

    
2559
  logging.info("Command line: %s", cmdline)
2560

    
2561
  try:
2562
    result = func(options, args)
2563
  except (errors.GenericError, luxi.ProtocolError,
2564
          JobSubmittedException), err:
2565
    result, err_msg = FormatError(err)
2566
    logging.exception("Error during command processing")
2567
    ToStderr(err_msg)
2568
  except KeyboardInterrupt:
2569
    result = constants.EXIT_FAILURE
2570
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2571
             " might have been submitted and"
2572
             " will continue to run in the background.")
2573
  except IOError, err:
2574
    if err.errno == errno.EPIPE:
2575
      # our terminal went away, we'll exit
2576
      sys.exit(constants.EXIT_FAILURE)
2577
    else:
2578
      raise
2579

    
2580
  return result
2581

    
2582

    
2583
def ParseNicOption(optvalue):
2584
  """Parses the value of the --net option(s).
2585

2586
  """
2587
  try:
2588
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2589
  except (TypeError, ValueError), err:
2590
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2591
                               errors.ECODE_INVAL)
2592

    
2593
  nics = [{}] * nic_max
2594
  for nidx, ndict in optvalue:
2595
    nidx = int(nidx)
2596

    
2597
    if not isinstance(ndict, dict):
2598
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2599
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2600

    
2601
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2602

    
2603
    nics[nidx] = ndict
2604

    
2605
  return nics
2606

    
2607

    
2608
def GenericInstanceCreate(mode, opts, args):
2609
  """Add an instance to the cluster via either creation or import.
2610

2611
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2612
  @param opts: the command line options selected by the user
2613
  @type args: list
2614
  @param args: should contain only one element, the new instance name
2615
  @rtype: int
2616
  @return: the desired exit code
2617

2618
  """
2619
  instance = args[0]
2620

    
2621
  (pnode, snode) = SplitNodeOption(opts.node)
2622

    
2623
  hypervisor = None
2624
  hvparams = {}
2625
  if opts.hypervisor:
2626
    hypervisor, hvparams = opts.hypervisor
2627

    
2628
  if opts.nics:
2629
    nics = ParseNicOption(opts.nics)
2630
  elif opts.no_nics:
2631
    # no nics
2632
    nics = []
2633
  elif mode == constants.INSTANCE_CREATE:
2634
    # default of one nic, all auto
2635
    nics = [{}]
2636
  else:
2637
    # mode == import
2638
    nics = []
2639

    
2640
  if opts.disk_template == constants.DT_DISKLESS:
2641
    if opts.disks or opts.sd_size is not None:
2642
      raise errors.OpPrereqError("Diskless instance but disk"
2643
                                 " information passed", errors.ECODE_INVAL)
2644
    disks = []
2645
  else:
2646
    if (not opts.disks and not opts.sd_size
2647
        and mode == constants.INSTANCE_CREATE):
2648
      raise errors.OpPrereqError("No disk information specified",
2649
                                 errors.ECODE_INVAL)
2650
    if opts.disks and opts.sd_size is not None:
2651
      raise errors.OpPrereqError("Please use either the '--disk' or"
2652
                                 " '-s' option", errors.ECODE_INVAL)
2653
    if opts.sd_size is not None:
2654
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2655

    
2656
    if opts.disks:
2657
      try:
2658
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2659
      except ValueError, err:
2660
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2661
                                   errors.ECODE_INVAL)
2662
      disks = [{}] * disk_max
2663
    else:
2664
      disks = []
2665
    for didx, ddict in opts.disks:
2666
      didx = int(didx)
2667
      if not isinstance(ddict, dict):
2668
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2669
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2670
      elif constants.IDISK_SIZE in ddict:
2671
        if constants.IDISK_ADOPT in ddict:
2672
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2673
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2674
        try:
2675
          ddict[constants.IDISK_SIZE] = \
2676
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2677
        except ValueError, err:
2678
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2679
                                     (didx, err), errors.ECODE_INVAL)
2680
      elif constants.IDISK_ADOPT in ddict:
2681
        if constants.IDISK_SPINDLES in ddict:
2682
          raise errors.OpPrereqError("spindles is not a valid option when"
2683
                                     " adopting a disk", errors.ECODE_INVAL)
2684
        if mode == constants.INSTANCE_IMPORT:
2685
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2686
                                     " import", errors.ECODE_INVAL)
2687
        ddict[constants.IDISK_SIZE] = 0
2688
      else:
2689
        raise errors.OpPrereqError("Missing size or adoption source for"
2690
                                   " disk %d" % didx, errors.ECODE_INVAL)
2691
      disks[didx] = ddict
2692

    
2693
  if opts.tags is not None:
2694
    tags = opts.tags.split(",")
2695
  else:
2696
    tags = []
2697

    
2698
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2699
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2700

    
2701
  if mode == constants.INSTANCE_CREATE:
2702
    start = opts.start
2703
    os_type = opts.os
2704
    force_variant = opts.force_variant
2705
    src_node = None
2706
    src_path = None
2707
    no_install = opts.no_install
2708
    identify_defaults = False
2709
  elif mode == constants.INSTANCE_IMPORT:
2710
    start = False
2711
    os_type = None
2712
    force_variant = False
2713
    src_node = opts.src_node
2714
    src_path = opts.src_dir
2715
    no_install = None
2716
    identify_defaults = opts.identify_defaults
2717
  else:
2718
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2719

    
2720
  op = opcodes.OpInstanceCreate(instance_name=instance,
2721
                                disks=disks,
2722
                                disk_template=opts.disk_template,
2723
                                nics=nics,
2724
                                conflicts_check=opts.conflicts_check,
2725
                                pnode=pnode, snode=snode,
2726
                                ip_check=opts.ip_check,
2727
                                name_check=opts.name_check,
2728
                                wait_for_sync=opts.wait_for_sync,
2729
                                file_storage_dir=opts.file_storage_dir,
2730
                                file_driver=opts.file_driver,
2731
                                iallocator=opts.iallocator,
2732
                                hypervisor=hypervisor,
2733
                                hvparams=hvparams,
2734
                                beparams=opts.beparams,
2735
                                osparams=opts.osparams,
2736
                                mode=mode,
2737
                                start=start,
2738
                                os_type=os_type,
2739
                                force_variant=force_variant,
2740
                                src_node=src_node,
2741
                                src_path=src_path,
2742
                                tags=tags,
2743
                                no_install=no_install,
2744
                                identify_defaults=identify_defaults,
2745
                                ignore_ipolicy=opts.ignore_ipolicy)
2746

    
2747
  SubmitOrSend(op, opts)
2748
  return 0
2749

    
2750

    
2751
class _RunWhileClusterStoppedHelper:
2752
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2753

2754
  """
2755
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2756
    """Initializes this class.
2757

2758
    @type feedback_fn: callable
2759
    @param feedback_fn: Feedback function
2760
    @type cluster_name: string
2761
    @param cluster_name: Cluster name
2762
    @type master_node: string
2763
    @param master_node Master node name
2764
    @type online_nodes: list
2765
    @param online_nodes: List of names of online nodes
2766

2767
    """
2768
    self.feedback_fn = feedback_fn
2769
    self.cluster_name = cluster_name
2770
    self.master_node = master_node
2771
    self.online_nodes = online_nodes
2772

    
2773
    self.ssh = ssh.SshRunner(self.cluster_name)
2774

    
2775
    self.nonmaster_nodes = [name for name in online_nodes
2776
                            if name != master_node]
2777

    
2778
    assert self.master_node not in self.nonmaster_nodes
2779

    
2780
  def _RunCmd(self, node_name, cmd):
2781
    """Runs a command on the local or a remote machine.
2782

2783
    @type node_name: string
2784
    @param node_name: Machine name
2785
    @type cmd: list
2786
    @param cmd: Command
2787

2788
    """
2789
    if node_name is None or node_name == self.master_node:
2790
      # No need to use SSH
2791
      result = utils.RunCmd(cmd)
2792
    else:
2793
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2794
                            utils.ShellQuoteArgs(cmd))
2795

    
2796
    if result.failed:
2797
      errmsg = ["Failed to run command %s" % result.cmd]
2798
      if node_name:
2799
        errmsg.append("on node %s" % node_name)
2800
      errmsg.append(": exitcode %s and error %s" %
2801
                    (result.exit_code, result.output))
2802
      raise errors.OpExecError(" ".join(errmsg))
2803

    
2804
  def Call(self, fn, *args):
2805
    """Call function while all daemons are stopped.
2806

2807
    @type fn: callable
2808
    @param fn: Function to be called
2809

2810
    """
2811
    # Pause watcher by acquiring an exclusive lock on watcher state file
2812
    self.feedback_fn("Blocking watcher")
2813
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2814
    try:
2815
      # TODO: Currently, this just blocks. There's no timeout.
2816
      # TODO: Should it be a shared lock?
2817
      watcher_block.Exclusive(blocking=True)
2818

    
2819
      # Stop master daemons, so that no new jobs can come in and all running
2820
      # ones are finished
2821
      self.feedback_fn("Stopping master daemons")
2822
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2823
      try:
2824
        # Stop daemons on all nodes
2825
        for node_name in self.online_nodes:
2826
          self.feedback_fn("Stopping daemons on %s" % node_name)
2827
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2828

    
2829
        # All daemons are shut down now
2830
        try:
2831
          return fn(self, *args)
2832
        except Exception, err:
2833
          _, errmsg = FormatError(err)
2834
          logging.exception("Caught exception")
2835
          self.feedback_fn(errmsg)
2836
          raise
2837
      finally:
2838
        # Start cluster again, master node last
2839
        for node_name in self.nonmaster_nodes + [self.master_node]:
2840
          self.feedback_fn("Starting daemons on %s" % node_name)
2841
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2842
    finally:
2843
      # Resume watcher
2844
      watcher_block.Close()
2845

    
2846

    
2847
def RunWhileClusterStopped(feedback_fn, fn, *args):
2848
  """Calls a function while all cluster daemons are stopped.
2849

2850
  @type feedback_fn: callable
2851
  @param feedback_fn: Feedback function
2852
  @type fn: callable
2853
  @param fn: Function to be called when daemons are stopped
2854

2855
  """
2856
  feedback_fn("Gathering cluster information")
2857

    
2858
  # This ensures we're running on the master daemon
2859
  cl = GetClient()
2860

    
2861
  (cluster_name, master_node) = \
2862
    cl.QueryConfigValues(["cluster_name", "master_node"])
2863

    
2864
  online_nodes = GetOnlineNodes([], cl=cl)
2865

    
2866
  # Don't keep a reference to the client. The master daemon will go away.
2867
  del cl
2868

    
2869
  assert master_node in online_nodes
2870

    
2871
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2872
                                       online_nodes).Call(fn, *args)
2873

    
2874

    
2875
def GenerateTable(headers, fields, separator, data,
2876
                  numfields=None, unitfields=None,
2877
                  units=None):
2878
  """Prints a table with headers and different fields.
2879

2880
  @type headers: dict
2881
  @param headers: dictionary mapping field names to headers for
2882
      the table
2883
  @type fields: list
2884
  @param fields: the field names corresponding to each row in
2885
      the data field
2886
  @param separator: the separator to be used; if this is None,
2887
      the default 'smart' algorithm is used which computes optimal
2888
      field width, otherwise just the separator is used between
2889
      each field
2890
  @type data: list
2891
  @param data: a list of lists, each sublist being one row to be output
2892
  @type numfields: list
2893
  @param numfields: a list with the fields that hold numeric
2894
      values and thus should be right-aligned
2895
  @type unitfields: list
2896
  @param unitfields: a list with the fields that hold numeric
2897
      values that should be formatted with the units field
2898
  @type units: string or None
2899
  @param units: the units we should use for formatting, or None for
2900
      automatic choice (human-readable for non-separator usage, otherwise
2901
      megabytes); this is a one-letter string
2902

2903
  """
2904
  if units is None:
2905
    if separator:
2906
      units = "m"
2907
    else:
2908
      units = "h"
2909

    
2910
  if numfields is None:
2911
    numfields = []
2912
  if unitfields is None:
2913
    unitfields = []
2914

    
2915
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2916
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2917

    
2918
  format_fields = []
2919
  for field in fields:
2920
    if headers and field not in headers:
2921
      # TODO: handle better unknown fields (either revert to old
2922
      # style of raising exception, or deal more intelligently with
2923
      # variable fields)
2924
      headers[field] = field
2925
    if separator is not None:
2926
      format_fields.append("%s")
2927
    elif numfields.Matches(field):
2928
      format_fields.append("%*s")
2929
    else:
2930
      format_fields.append("%-*s")
2931

    
2932
  if separator is None:
2933
    mlens = [0 for name in fields]
2934
    format_str = " ".join(format_fields)
2935
  else:
2936
    format_str = separator.replace("%", "%%").join(format_fields)
2937

    
2938
  for row in data:
2939
    if row is None:
2940
      continue
2941
    for idx, val in enumerate(row):
2942
      if unitfields.Matches(fields[idx]):
2943
        try:
2944
          val = int(val)
2945
        except (TypeError, ValueError):
2946
          pass
2947
        else:
2948
          val = row[idx] = utils.FormatUnit(val, units)
2949
      val = row[idx] = str(val)
2950
      if separator is None:
2951
        mlens[idx] = max(mlens[idx], len(val))
2952

    
2953
  result = []
2954
  if headers:
2955
    args = []
2956
    for idx, name in enumerate(fields):
2957
      hdr = headers[name]
2958
      if separator is None:
2959
        mlens[idx] = max(mlens[idx], len(hdr))
2960
        args.append(mlens[idx])
2961
      args.append(hdr)
2962
    result.append(format_str % tuple(args))
2963

    
2964
  if separator is None:
2965
    assert len(mlens) == len(fields)
2966

    
2967
    if fields and not numfields.Matches(fields[-1]):
2968
      mlens[-1] = 0
2969

    
2970
  for line in data:
2971
    args = []
2972
    if line is None:
2973
      line = ["-" for _ in fields]
2974
    for idx in range(len(fields)):
2975
      if separator is None:
2976
        args.append(mlens[idx])
2977
      args.append(line[idx])
2978
    result.append(format_str % tuple(args))
2979

    
2980
  return result
2981

    
2982

    
2983
def _FormatBool(value):
2984
  """Formats a boolean value as a string.
2985

2986
  """
2987
  if value:
2988
    return "Y"
2989
  return "N"
2990

    
2991

    
2992
#: Default formatting for query results; (callback, align right)
2993
_DEFAULT_FORMAT_QUERY = {
2994
  constants.QFT_TEXT: (str, False),
2995
  constants.QFT_BOOL: (_FormatBool, False),
2996
  constants.QFT_NUMBER: (str, True),
2997
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2998
  constants.QFT_OTHER: (str, False),
2999
  constants.QFT_UNKNOWN: (str, False),
3000
  }
3001

    
3002

    
3003
def _GetColumnFormatter(fdef, override, unit):
3004
  """Returns formatting function for a field.
3005

3006
  @type fdef: L{objects.QueryFieldDefinition}
3007
  @type override: dict
3008
  @param override: Dictionary for overriding field formatting functions,
3009
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3010
  @type unit: string
3011
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
3012
  @rtype: tuple; (callable, bool)
3013
  @return: Returns the function to format a value (takes one parameter) and a
3014
    boolean for aligning the value on the right-hand side
3015

3016
  """
3017
  fmt = override.get(fdef.name, None)
3018
  if fmt is not None:
3019
    return fmt
3020

    
3021
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3022

    
3023
  if fdef.kind == constants.QFT_UNIT:
3024
    # Can't keep this information in the static dictionary
3025
    return (lambda value: utils.FormatUnit(value, unit), True)
3026

    
3027
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3028
  if fmt is not None:
3029
    return fmt
3030

    
3031
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3032

    
3033

    
3034
class _QueryColumnFormatter:
3035
  """Callable class for formatting fields of a query.
3036

3037
  """
3038
  def __init__(self, fn, status_fn, verbose):
3039
    """Initializes this class.
3040

3041
    @type fn: callable
3042
    @param fn: Formatting function
3043
    @type status_fn: callable
3044
    @param status_fn: Function to report fields' status
3045
    @type verbose: boolean
3046
    @param verbose: whether to use verbose field descriptions or not
3047

3048
    """
3049
    self._fn = fn
3050
    self._status_fn = status_fn
3051
    self._verbose = verbose
3052

    
3053
  def __call__(self, data):
3054
    """Returns a field's string representation.
3055

3056
    """
3057
    (status, value) = data
3058

    
3059
    # Report status
3060
    self._status_fn(status)
3061

    
3062
    if status == constants.RS_NORMAL:
3063
      return self._fn(value)
3064

    
3065
    assert value is None, \
3066
           "Found value %r for abnormal status %s" % (value, status)
3067

    
3068
    return FormatResultError(status, self._verbose)
3069

    
3070

    
3071
def FormatResultError(status, verbose):
3072
  """Formats result status other than L{constants.RS_NORMAL}.
3073

3074
  @param status: The result status
3075
  @type verbose: boolean
3076
  @param verbose: Whether to return the verbose text
3077
  @return: Text of result status
3078

3079
  """
3080
  assert status != constants.RS_NORMAL, \
3081
         "FormatResultError called with status equal to constants.RS_NORMAL"
3082
  try:
3083
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3084
  except KeyError:
3085
    raise NotImplementedError("Unknown status %s" % status)
3086
  else:
3087
    if verbose:
3088
      return verbose_text
3089
    return normal_text
3090

    
3091

    
3092
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3093
                      header=False, verbose=False):
3094
  """Formats data in L{objects.QueryResponse}.
3095

3096
  @type result: L{objects.QueryResponse}
3097
  @param result: result of query operation
3098
  @type unit: string
3099
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3100
    see L{utils.text.FormatUnit}
3101
  @type format_override: dict
3102
  @param format_override: Dictionary for overriding field formatting functions,
3103
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3104
  @type separator: string or None
3105
  @param separator: String used to separate fields
3106
  @type header: bool
3107
  @param header: Whether to output header row
3108
  @type verbose: boolean
3109
  @param verbose: whether to use verbose field descriptions or not
3110

3111
  """
3112
  if unit is None:
3113
    if separator:
3114
      unit = "m"
3115
    else:
3116
      unit = "h"
3117

    
3118
  if format_override is None:
3119
    format_override = {}
3120

    
3121
  stats = dict.fromkeys(constants.RS_ALL, 0)
3122

    
3123
  def _RecordStatus(status):
3124
    if status in stats:
3125
      stats[status] += 1
3126

    
3127
  columns = []
3128
  for fdef in result.fields:
3129
    assert fdef.title and fdef.name
3130
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3131
    columns.append(TableColumn(fdef.title,
3132
                               _QueryColumnFormatter(fn, _RecordStatus,
3133
                                                     verbose),
3134
                               align_right))
3135

    
3136
  table = FormatTable(result.data, columns, header, separator)
3137

    
3138
  # Collect statistics
3139
  assert len(stats) == len(constants.RS_ALL)
3140
  assert compat.all(count >= 0 for count in stats.values())
3141

    
3142
  # Determine overall status. If there was no data, unknown fields must be
3143
  # detected via the field definitions.
3144
  if (stats[constants.RS_UNKNOWN] or
3145
      (not result.data and _GetUnknownFields(result.fields))):
3146
    status = QR_UNKNOWN
3147
  elif compat.any(count > 0 for key, count in stats.items()
3148
                  if key != constants.RS_NORMAL):
3149
    status = QR_INCOMPLETE
3150
  else:
3151
    status = QR_NORMAL
3152

    
3153
  return (status, table)
3154

    
3155

    
3156
def _GetUnknownFields(fdefs):
3157
  """Returns list of unknown fields included in C{fdefs}.
3158

3159
  @type fdefs: list of L{objects.QueryFieldDefinition}
3160

3161
  """
3162
  return [fdef for fdef in fdefs
3163
          if fdef.kind == constants.QFT_UNKNOWN]
3164

    
3165

    
3166
def _WarnUnknownFields(fdefs):
3167
  """Prints a warning to stderr if a query included unknown fields.
3168

3169
  @type fdefs: list of L{objects.QueryFieldDefinition}
3170

3171
  """
3172
  unknown = _GetUnknownFields(fdefs)
3173
  if unknown:
3174
    ToStderr("Warning: Queried for unknown fields %s",
3175
             utils.CommaJoin(fdef.name for fdef in unknown))
3176
    return True
3177

    
3178
  return False
3179

    
3180

    
3181
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3182
                format_override=None, verbose=False, force_filter=False,
3183
                namefield=None, qfilter=None, isnumeric=False):
3184
  """Generic implementation for listing all items of a resource.
3185

3186
  @param resource: One of L{constants.QR_VIA_LUXI}
3187
  @type fields: list of strings
3188
  @param fields: List of fields to query for
3189
  @type names: list of strings
3190
  @param names: Names of items to query for
3191
  @type unit: string or None
3192
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3193
    None for automatic choice (human-readable for non-separator usage,
3194
    otherwise megabytes); this is a one-letter string
3195
  @type separator: string or None
3196
  @param separator: String used to separate fields
3197
  @type header: bool
3198
  @param header: Whether to show header row
3199
  @type force_filter: bool
3200
  @param force_filter: Whether to always treat names as filter
3201
  @type format_override: dict
3202
  @param format_override: Dictionary for overriding field formatting functions,
3203
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3204
  @type verbose: boolean
3205
  @param verbose: whether to use verbose field descriptions or not
3206
  @type namefield: string
3207
  @param namefield: Name of field to use for simple filters (see
3208
    L{qlang.MakeFilter} for details)
3209
  @type qfilter: list or None
3210
  @param qfilter: Query filter (in addition to names)
3211
  @param isnumeric: bool
3212
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3213
    any simple filters built by namefield should use integer values to
3214
    reflect that
3215

3216
  """
3217
  if not names:
3218
    names = None
3219

    
3220
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3221
                                isnumeric=isnumeric)
3222

    
3223
  if qfilter is None:
3224
    qfilter = namefilter
3225
  elif namefilter is not None:
3226
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3227

    
3228
  if cl is None:
3229
    cl = GetClient()
3230

    
3231
  response = cl.Query(resource, fields, qfilter)
3232

    
3233
  found_unknown = _WarnUnknownFields(response.fields)
3234

    
3235
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3236
                                     header=header,
3237
                                     format_override=format_override,
3238
                                     verbose=verbose)
3239

    
3240
  for line in data:
3241
    ToStdout(line)
3242

    
3243
  assert ((found_unknown and status == QR_UNKNOWN) or
3244
          (not found_unknown and status != QR_UNKNOWN))
3245

    
3246
  if status == QR_UNKNOWN:
3247
    return constants.EXIT_UNKNOWN_FIELD
3248

    
3249
  # TODO: Should the list command fail if not all data could be collected?
3250
  return constants.EXIT_SUCCESS
3251

    
3252

    
3253
def _FieldDescValues(fdef):
3254
  """Helper function for L{GenericListFields} to get query field description.
3255

3256
  @type fdef: L{objects.QueryFieldDefinition}
3257
  @rtype: list
3258

3259
  """
3260
  return [
3261
    fdef.name,
3262
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3263
    fdef.title,
3264
    fdef.doc,
3265
    ]
3266

    
3267

    
3268
def GenericListFields(resource, fields, separator, header, cl=None):
3269
  """Generic implementation for listing fields for a resource.
3270

3271
  @param resource: One of L{constants.QR_VIA_LUXI}
3272
  @type fields: list of strings
3273
  @param fields: List of fields to query for
3274
  @type separator: string or None
3275
  @param separator: String used to separate fields
3276
  @type header: bool
3277
  @param header: Whether to show header row
3278

3279
  """
3280
  if cl is None:
3281
    cl = GetClient()
3282

    
3283
  if not fields:
3284
    fields = None
3285

    
3286
  response = cl.QueryFields(resource, fields)
3287

    
3288
  found_unknown = _WarnUnknownFields(response.fields)
3289

    
3290
  columns = [
3291
    TableColumn("Name", str, False),
3292
    TableColumn("Type", str, False),
3293
    TableColumn("Title", str, False),
3294
    TableColumn("Description", str, False),
3295
    ]
3296

    
3297
  rows = map(_FieldDescValues, response.fields)
3298

    
3299
  for line in FormatTable(rows, columns, header, separator):
3300
    ToStdout(line)
3301

    
3302
  if found_unknown:
3303
    return constants.EXIT_UNKNOWN_FIELD
3304

    
3305
  return constants.EXIT_SUCCESS
3306

    
3307

    
3308
class TableColumn:
3309
  """Describes a column for L{FormatTable}.
3310

3311
  """
3312
  def __init__(self, title, fn, align_right):
3313
    """Initializes this class.
3314

3315
    @type title: string
3316
    @param title: Column title
3317
    @type fn: callable
3318
    @param fn: Formatting function
3319
    @type align_right: bool
3320
    @param align_right: Whether to align values on the right-hand side
3321

3322
    """
3323
    self.title = title
3324
    self.format = fn
3325
    self.align_right = align_right
3326

    
3327

    
3328
def _GetColFormatString(width, align_right):
3329
  """Returns the format string for a field.
3330

3331
  """
3332
  if align_right:
3333
    sign = ""
3334
  else:
3335
    sign = "-"
3336

    
3337
  return "%%%s%ss" % (sign, width)
3338

    
3339

    
3340
def FormatTable(rows, columns, header, separator):
3341
  """Formats data as a table.
3342

3343
  @type rows: list of lists
3344
  @param rows: Row data, one list per row
3345
  @type columns: list of L{TableColumn}
3346
  @param columns: Column descriptions
3347
  @type header: bool
3348
  @param header: Whether to show header row
3349
  @type separator: string or None
3350
  @param separator: String used to separate columns
3351

3352
  """
3353
  if header:
3354
    data = [[col.title for col in columns]]
3355
    colwidth = [len(col.title) for col in columns]
3356
  else:
3357
    data = []
3358
    colwidth = [0 for _ in columns]
3359

    
3360
  # Format row data
3361
  for row in rows:
3362
    assert len(row) == len(columns)
3363

    
3364
    formatted = [col.format(value) for value, col in zip(row, columns)]
3365

    
3366
    if separator is None:
3367
      # Update column widths
3368
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3369
        # Modifying a list's items while iterating is fine
3370
        colwidth[idx] = max(oldwidth, len(value))
3371

    
3372
    data.append(formatted)
3373

    
3374
  if separator is not None:
3375
    # Return early if a separator is used
3376
    return [separator.join(row) for row in data]
3377

    
3378
  if columns and not columns[-1].align_right:
3379
    # Avoid unnecessary spaces at end of line
3380
    colwidth[-1] = 0
3381

    
3382
  # Build format string
3383
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3384
                  for col, width in zip(columns, colwidth)])
3385

    
3386
  return [fmt % tuple(row) for row in data]
3387

    
3388

    
3389
def FormatTimestamp(ts):
3390
  """Formats a given timestamp.
3391

3392
  @type ts: timestamp
3393
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3394

3395
  @rtype: string
3396
  @return: a string with the formatted timestamp
3397

3398
  """
3399
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3400
    return "?"
3401

    
3402
  (sec, usecs) = ts
3403
  return utils.FormatTime(sec, usecs=usecs)
3404

    
3405

    
3406
def ParseTimespec(value):
3407
  """Parse a time specification.
3408

3409
  The following suffixed will be recognized:
3410

3411
    - s: seconds
3412
    - m: minutes
3413
    - h: hours
3414
    - d: day
3415
    - w: weeks
3416

3417
  Without any suffix, the value will be taken to be in seconds.
3418

3419
  """
3420
  value = str(value)
3421
  if not value:
3422
    raise errors.OpPrereqError("Empty time specification passed",
3423
                               errors.ECODE_INVAL)
3424
  suffix_map = {
3425
    "s": 1,
3426
    "m": 60,
3427
    "h": 3600,
3428
    "d": 86400,
3429
    "w": 604800,
3430
    }
3431
  if value[-1] not in suffix_map:
3432
    try:
3433
      value = int(value)
3434
    except (TypeError, ValueError):
3435
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3436
                                 errors.ECODE_INVAL)
3437
  else:
3438
    multiplier = suffix_map[value[-1]]
3439
    value = value[:-1]
3440
    if not value: # no data left after stripping the suffix
3441
      raise errors.OpPrereqError("Invalid time specification (only"
3442
                                 " suffix passed)", errors.ECODE_INVAL)
3443
    try:
3444
      value = int(value) * multiplier
3445
    except (TypeError, ValueError):
3446
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3447
                                 errors.ECODE_INVAL)
3448
  return value
3449

    
3450

    
3451
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3452
                   filter_master=False, nodegroup=None):
3453
  """Returns the names of online nodes.
3454

3455
  This function will also log a warning on stderr with the names of
3456
  the online nodes.
3457

3458
  @param nodes: if not empty, use only this subset of nodes (minus the
3459
      offline ones)
3460
  @param cl: if not None, luxi client to use
3461
  @type nowarn: boolean
3462
  @param nowarn: by default, this function will output a note with the
3463
      offline nodes that are skipped; if this parameter is True the
3464
      note is not displayed
3465
  @type secondary_ips: boolean
3466
  @param secondary_ips: if True, return the secondary IPs instead of the
3467
      names, useful for doing network traffic over the replication interface
3468
      (if any)
3469
  @type filter_master: boolean
3470
  @param filter_master: if True, do not return the master node in the list
3471
      (useful in coordination with secondary_ips where we cannot check our
3472
      node name against the list)
3473
  @type nodegroup: string
3474
  @param nodegroup: If set, only return nodes in this node group
3475

3476
  """
3477
  if cl is None:
3478
    cl = GetClient()
3479

    
3480
  qfilter = []
3481

    
3482
  if nodes:
3483
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3484

    
3485
  if nodegroup is not None:
3486
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3487
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3488

    
3489
  if filter_master:
3490
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3491

    
3492
  if qfilter:
3493
    if len(qfilter) > 1:
3494
      final_filter = [qlang.OP_AND] + qfilter
3495
    else:
3496
      assert len(qfilter) == 1
3497
      final_filter = qfilter[0]
3498
  else:
3499
    final_filter = None
3500

    
3501
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3502

    
3503
  def _IsOffline(row):
3504
    (_, (_, offline), _) = row
3505
    return offline
3506

    
3507
  def _GetName(row):
3508
    ((_, name), _, _) = row
3509
    return name
3510

    
3511
  def _GetSip(row):
3512
    (_, _, (_, sip)) = row
3513
    return sip
3514

    
3515
  (offline, online) = compat.partition(result.data, _IsOffline)
3516

    
3517
  if offline and not nowarn:
3518
    ToStderr("Note: skipping offline node(s): %s" %
3519
             utils.CommaJoin(map(_GetName, offline)))
3520

    
3521
  if secondary_ips:
3522
    fn = _GetSip
3523
  else:
3524
    fn = _GetName
3525

    
3526
  return map(fn, online)
3527

    
3528

    
3529
def _ToStream(stream, txt, *args):
3530
  """Write a message to a stream, bypassing the logging system
3531

3532
  @type stream: file object
3533
  @param stream: the file to which we should write
3534
  @type txt: str
3535
  @param txt: the message
3536

3537
  """
3538
  try:
3539
    if args:
3540
      args = tuple(args)
3541
      stream.write(txt % args)
3542
    else:
3543
      stream.write(txt)
3544
    stream.write("\n")
3545
    stream.flush()
3546
  except IOError, err:
3547
    if err.errno == errno.EPIPE:
3548
      # our terminal went away, we'll exit
3549
      sys.exit(constants.EXIT_FAILURE)
3550
    else:
3551
      raise
3552

    
3553

    
3554
def ToStdout(txt, *args):
3555
  """Write a message to stdout only, bypassing the logging system
3556

3557
  This is just a wrapper over _ToStream.
3558

3559
  @type txt: str
3560
  @param txt: the message
3561

3562
  """
3563
  _ToStream(sys.stdout, txt, *args)
3564

    
3565

    
3566
def ToStderr(txt, *args):
3567
  """Write a message to stderr only, bypassing the logging system
3568

3569
  This is just a wrapper over _ToStream.
3570

3571
  @type txt: str
3572
  @param txt: the message
3573

3574
  """
3575
  _ToStream(sys.stderr, txt, *args)
3576

    
3577

    
3578
class JobExecutor(object):
3579
  """Class which manages the submission and execution of multiple jobs.
3580

3581
  Note that instances of this class should not be reused between
3582
  GetResults() calls.
3583

3584
  """
3585
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3586
    self.queue = []
3587
    if cl is None:
3588
      cl = GetClient()
3589
    self.cl = cl
3590
    self.verbose = verbose
3591
    self.jobs = []
3592
    self.opts = opts
3593
    self.feedback_fn = feedback_fn
3594
    self._counter = itertools.count()
3595

    
3596
  @staticmethod
3597
  def _IfName(name, fmt):
3598
    """Helper function for formatting name.
3599

3600
    """
3601
    if name:
3602
      return fmt % name
3603

    
3604
    return ""
3605

    
3606
  def QueueJob(self, name, *ops):
3607
    """Record a job for later submit.
3608

3609
    @type name: string
3610
    @param name: a description of the job, will be used in WaitJobSet
3611

3612
    """
3613
    SetGenericOpcodeOpts(ops, self.opts)
3614
    self.queue.append((self._counter.next(), name, ops))
3615

    
3616
  def AddJobId(self, name, status, job_id):
3617
    """Adds a job ID to the internal queue.
3618

3619
    """
3620
    self.jobs.append((self._counter.next(), status, job_id, name))
3621

    
3622
  def SubmitPending(self, each=False):
3623
    """Submit all pending jobs.
3624

3625
    """
3626
    if each:
3627
      results = []
3628
      for (_, _, ops) in self.queue:
3629
        # SubmitJob will remove the success status, but raise an exception if
3630
        # the submission fails, so we'll notice that anyway.
3631
        results.append([True, self.cl.SubmitJob(ops)[0]])
3632
    else:
3633
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3634
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3635
      self.jobs.append((idx, status, data, name))
3636

    
3637
  def _ChooseJob(self):
3638
    """Choose a non-waiting/queued job to poll next.
3639

3640
    """
3641
    assert self.jobs, "_ChooseJob called with empty job list"
3642

    
3643
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3644
                               ["status"])
3645
    assert result
3646

    
3647
    for job_data, status in zip(self.jobs, result):
3648
      if (isinstance(status, list) and status and
3649
          status[0] in (constants.JOB_STATUS_QUEUED,
3650
                        constants.JOB_STATUS_WAITING,
3651
                        constants.JOB_STATUS_CANCELING)):
3652
        # job is still present and waiting
3653
        continue
3654
      # good candidate found (either running job or lost job)
3655
      self.jobs.remove(job_data)
3656
      return job_data
3657

    
3658
    # no job found
3659
    return self.jobs.pop(0)
3660

    
3661
  def GetResults(self):
3662
    """Wait for and return the results of all jobs.
3663

3664
    @rtype: list
3665
    @return: list of tuples (success, job results), in the same order
3666
        as the submitted jobs; if a job has failed, instead of the result
3667
        there will be the error message
3668

3669
    """
3670
    if not self.jobs:
3671
      self.SubmitPending()
3672
    results = []
3673
    if self.verbose:
3674
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3675
      if ok_jobs:
3676
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3677

    
3678
    # first, remove any non-submitted jobs
3679
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3680
    for idx, _, jid, name in failures:
3681
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3682
      results.append((idx, False, jid))
3683

    
3684
    while self.jobs:
3685
      (idx, _, jid, name) = self._ChooseJob()
3686
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3687
      try:
3688
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3689
        success = True
3690
      except errors.JobLost, err:
3691
        _, job_result = FormatError(err)
3692
        ToStderr("Job %s%s has been archived, cannot check its result",
3693
                 jid, self._IfName(name, " for %s"))
3694
        success = False
3695
      except (errors.GenericError, luxi.ProtocolError), err:
3696
        _, job_result = FormatError(err)
3697
        success = False
3698
        # the error message will always be shown, verbose or not
3699
        ToStderr("Job %s%s has failed: %s",
3700
                 jid, self._IfName(name, " for %s"), job_result)
3701

    
3702
      results.append((idx, success, job_result))
3703

    
3704
    # sort based on the index, then drop it
3705
    results.sort()
3706
    results = [i[1:] for i in results]
3707

    
3708
    return results
3709

    
3710
  def WaitOrShow(self, wait):
3711
    """Wait for job results or only print the job IDs.
3712

3713
    @type wait: boolean
3714
    @param wait: whether to wait or not
3715

3716
    """
3717
    if wait:
3718
      return self.GetResults()
3719
    else:
3720
      if not self.jobs:
3721
        self.SubmitPending()
3722
      for _, status, result, name in self.jobs:
3723
        if status:
3724
          ToStdout("%s: %s", result, name)
3725
        else:
3726
          ToStderr("Failure for %s: %s", name, result)
3727
      return [row[1:3] for row in self.jobs]
3728

    
3729

    
3730
def FormatParamsDictInfo(param_dict, actual):
3731
  """Formats a parameter dictionary.
3732

3733
  @type param_dict: dict
3734
  @param param_dict: the own parameters
3735
  @type actual: dict
3736
  @param actual: the current parameter set (including defaults)
3737
  @rtype: dict
3738
  @return: dictionary where the value of each parameter is either a fully
3739
      formatted string or a dictionary containing formatted strings
3740

3741
  """
3742
  ret = {}
3743
  for (key, data) in actual.items():
3744
    if isinstance(data, dict) and data:
3745
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3746
    else:
3747
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3748
  return ret
3749

    
3750

    
3751
def _FormatListInfoDefault(data, def_data):
3752
  if data is not None:
3753
    ret = utils.CommaJoin(data)
3754
  else:
3755
    ret = "default (%s)" % utils.CommaJoin(def_data)
3756
  return ret
3757

    
3758

    
3759
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3760
  """Formats an instance policy.
3761

3762
  @type custom_ipolicy: dict
3763
  @param custom_ipolicy: own policy
3764
  @type eff_ipolicy: dict
3765
  @param eff_ipolicy: effective policy (including defaults); ignored for
3766
      cluster
3767
  @type iscluster: bool
3768
  @param iscluster: the policy is at cluster level
3769
  @rtype: list of pairs
3770
  @return: formatted data, suitable for L{PrintGenericInfo}
3771

3772
  """
3773
  if iscluster:
3774
    eff_ipolicy = custom_ipolicy
3775

    
3776
  minmax_out = []
3777
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3778
  if custom_minmax:
3779
    for (k, minmax) in enumerate(custom_minmax):
3780
      minmax_out.append([
3781
        ("%s/%s" % (key, k),
3782
         FormatParamsDictInfo(minmax[key], minmax[key]))
3783
        for key in constants.ISPECS_MINMAX_KEYS
3784
        ])
3785
  else:
3786
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3787
      minmax_out.append([
3788
        ("%s/%s" % (key, k),
3789
         FormatParamsDictInfo({}, minmax[key]))
3790
        for key in constants.ISPECS_MINMAX_KEYS
3791
        ])
3792
  ret = [("bounds specs", minmax_out)]
3793

    
3794
  if iscluster:
3795
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3796
    ret.append(
3797
      (constants.ISPECS_STD,
3798
       FormatParamsDictInfo(stdspecs, stdspecs))
3799
      )
3800

    
3801
  ret.append(
3802
    ("allowed disk templates",
3803
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3804
                            eff_ipolicy[constants.IPOLICY_DTS]))
3805
    )
3806
  ret.extend([
3807
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3808
    for key in constants.IPOLICY_PARAMETERS
3809
    ])
3810
  return ret
3811

    
3812

    
3813
def _PrintSpecsParameters(buf, specs):
3814
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3815
  buf.write(",".join(values))
3816

    
3817

    
3818
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3819
  """Print the command option used to generate the given instance policy.
3820

3821
  Currently only the parts dealing with specs are supported.
3822

3823
  @type buf: StringIO
3824
  @param buf: stream to write into
3825
  @type ipolicy: dict
3826
  @param ipolicy: instance policy
3827
  @type isgroup: bool
3828
  @param isgroup: whether the policy is at group level
3829

3830
  """
3831
  if not isgroup:
3832
    stdspecs = ipolicy.get("std")
3833
    if stdspecs:
3834
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3835
      _PrintSpecsParameters(buf, stdspecs)
3836
  minmaxes = ipolicy.get("minmax", [])
3837
  first = True
3838
  for minmax in minmaxes:
3839
    minspecs = minmax.get("min")
3840
    maxspecs = minmax.get("max")
3841
    if minspecs and maxspecs:
3842
      if first:
3843
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3844
        first = False
3845
      else:
3846
        buf.write("//")
3847
      buf.write("min:")
3848
      _PrintSpecsParameters(buf, minspecs)
3849
      buf.write("/max:")
3850
      _PrintSpecsParameters(buf, maxspecs)
3851

    
3852

    
3853
def ConfirmOperation(names, list_type, text, extra=""):
3854
  """Ask the user to confirm an operation on a list of list_type.
3855

3856
  This function is used to request confirmation for doing an operation
3857
  on a given list of list_type.
3858

3859
  @type names: list
3860
  @param names: the list of names that we display when
3861
      we ask for confirmation
3862
  @type list_type: str
3863
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3864
  @type text: str
3865
  @param text: the operation that the user should confirm
3866
  @rtype: boolean
3867
  @return: True or False depending on user's confirmation.
3868

3869
  """
3870
  count = len(names)
3871
  msg = ("The %s will operate on %d %s.\n%s"
3872
         "Do you want to continue?" % (text, count, list_type, extra))
3873
  affected = (("\nAffected %s:\n" % list_type) +
3874
              "\n".join(["  %s" % name for name in names]))
3875

    
3876
  choices = [("y", True, "Yes, execute the %s" % text),
3877
             ("n", False, "No, abort the %s" % text)]
3878

    
3879
  if count > 20:
3880
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3881
    question = msg
3882
  else:
3883
    question = msg + affected
3884

    
3885
  choice = AskUser(question, choices)
3886
  if choice == "v":
3887
    choices.pop(1)
3888
    choice = AskUser(msg + affected, choices)
3889
  return choice
3890

    
3891

    
3892
def _MaybeParseUnit(elements):
3893
  """Parses and returns an array of potential values with units.
3894

3895
  """
3896
  parsed = {}
3897
  for k, v in elements.items():
3898
    if v == constants.VALUE_DEFAULT:
3899
      parsed[k] = v
3900
    else:
3901
      parsed[k] = utils.ParseUnit(v)
3902
  return parsed
3903

    
3904

    
3905
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3906
                             ispecs_disk_count, ispecs_disk_size,
3907
                             ispecs_nic_count, group_ipolicy, fill_all):
3908
  try:
3909
    if ispecs_mem_size:
3910
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3911
    if ispecs_disk_size:
3912
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3913
  except (TypeError, ValueError, errors.UnitParseError), err:
3914
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3915
                               " in policy: %s" %
3916
                               (ispecs_disk_size, ispecs_mem_size, err),
3917
                               errors.ECODE_INVAL)
3918

    
3919
  # prepare ipolicy dict
3920
  ispecs_transposed = {
3921
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3922
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3923
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3924
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3925
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3926
    }
3927

    
3928
  # first, check that the values given are correct
3929
  if group_ipolicy:
3930
    forced_type = TISPECS_GROUP_TYPES
3931
  else:
3932
    forced_type = TISPECS_CLUSTER_TYPES
3933
  for specs in ispecs_transposed.values():
3934
    assert type(specs) is dict
3935
    utils.ForceDictType(specs, forced_type)
3936

    
3937
  # then transpose
3938
  ispecs = {
3939
    constants.ISPECS_MIN: {},
3940
    constants.ISPECS_MAX: {},
3941
    constants.ISPECS_STD: {},
3942
    }
3943
  for (name, specs) in ispecs_transposed.iteritems():
3944
    assert name in constants.ISPECS_PARAMETERS
3945
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3946
      assert key in ispecs
3947
      ispecs[key][name] = val
3948
  minmax_out = {}
3949
  for key in constants.ISPECS_MINMAX_KEYS:
3950
    if fill_all:
3951
      minmax_out[key] = \
3952
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3953
    else:
3954
      minmax_out[key] = ispecs[key]
3955
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3956
  if fill_all:
3957
    ipolicy[constants.ISPECS_STD] = \
3958
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3959
                         ispecs[constants.ISPECS_STD])
3960
  else:
3961
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3962

    
3963

    
3964
def _ParseSpecUnit(spec, keyname):
3965
  ret = spec.copy()
3966
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3967
    if k in ret:
3968
      try:
3969
        ret[k] = utils.ParseUnit(ret[k])
3970
      except (TypeError, ValueError, errors.UnitParseError), err:
3971
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3972
                                    " specs: %s" % (k, ret[k], keyname, err)),
3973
                                   errors.ECODE_INVAL)
3974
  return ret
3975

    
3976

    
3977
def _ParseISpec(spec, keyname, required):
3978
  ret = _ParseSpecUnit(spec, keyname)
3979
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3980
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3981
  if required and missing:
3982
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3983
                               (keyname, utils.CommaJoin(missing)),
3984
                               errors.ECODE_INVAL)
3985
  return ret
3986

    
3987

    
3988
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3989
  ret = None
3990
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3991
      len(minmax_ispecs[0]) == 1):
3992
    for (key, spec) in minmax_ispecs[0].items():
3993
      # This loop is executed exactly once
3994
      if key in allowed_values and not spec:
3995
        ret = key
3996
  return ret
3997

    
3998

    
3999
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4000
                            group_ipolicy, allowed_values):
4001
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
4002
  if found_allowed is not None:
4003
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
4004
  elif minmax_ispecs is not None:
4005
    minmax_out = []
4006
    for mmpair in minmax_ispecs:
4007
      mmpair_out = {}
4008
      for (key, spec) in mmpair.items():
4009
        if key not in constants.ISPECS_MINMAX_KEYS:
4010
          msg = "Invalid key in bounds instance specifications: %s" % key
4011
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
4012
        mmpair_out[key] = _ParseISpec(spec, key, True)
4013
      minmax_out.append(mmpair_out)
4014
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
4015
  if std_ispecs is not None:
4016
    assert not group_ipolicy # This is not an option for gnt-group
4017
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4018

    
4019

    
4020
def CreateIPolicyFromOpts(ispecs_mem_size=None,
4021
                          ispecs_cpu_count=None,
4022
                          ispecs_disk_count=None,
4023
                          ispecs_disk_size=None,
4024
                          ispecs_nic_count=None,
4025
                          minmax_ispecs=None,
4026
                          std_ispecs=None,
4027
                          ipolicy_disk_templates=None,
4028
                          ipolicy_vcpu_ratio=None,
4029
                          ipolicy_spindle_ratio=None,
4030
                          group_ipolicy=False,
4031
                          allowed_values=None,
4032
                          fill_all=False):
4033
  """Creation of instance policy based on command line options.
4034

4035
  @param fill_all: whether for cluster policies we should ensure that
4036
    all values are filled
4037

4038
  """
4039
  assert not (fill_all and allowed_values)
4040

    
4041
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4042
                 ispecs_disk_size or ispecs_nic_count)
4043
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4044
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4045
                               " together with any --ipolicy-xxx-specs option",
4046
                               errors.ECODE_INVAL)
4047

    
4048
  ipolicy_out = objects.MakeEmptyIPolicy()
4049
  if split_specs:
4050
    assert fill_all
4051
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4052
                             ispecs_disk_count, ispecs_disk_size,
4053
                             ispecs_nic_count, group_ipolicy, fill_all)
4054
  elif (minmax_ispecs is not None or std_ispecs is not None):
4055
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4056
                            group_ipolicy, allowed_values)
4057

    
4058
  if ipolicy_disk_templates is not None:
4059
    if allowed_values and ipolicy_disk_templates in allowed_values:
4060
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4061
    else:
4062
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4063
  if ipolicy_vcpu_ratio is not None:
4064
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4065
  if ipolicy_spindle_ratio is not None:
4066
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4067

    
4068
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4069

    
4070
  if not group_ipolicy and fill_all:
4071
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4072

    
4073
  return ipolicy_out
4074

    
4075

    
4076
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4077
  """Formatting core of L{PrintGenericInfo}.
4078

4079
  @param buf: (string) stream to accumulate the result into
4080
  @param data: data to format
4081
  @type level: int
4082
  @param level: depth in the data hierarchy, used for indenting
4083
  @type afterkey: bool
4084
  @param afterkey: True when we are in the middle of a line after a key (used
4085
      to properly add newlines or indentation)
4086

4087
  """
4088
  baseind = "  "
4089
  if isinstance(data, dict):
4090
    if not data:
4091
      buf.write("\n")
4092
    else:
4093
      if afterkey:
4094
        buf.write("\n")
4095
        doindent = True
4096
      else:
4097
        doindent = False
4098
      for key in sorted(data):
4099
        if doindent:
4100
          buf.write(baseind * level)
4101
        else:
4102
          doindent = True
4103
        buf.write(key)
4104
        buf.write(": ")
4105
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4106
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4107
    # list of tuples (an ordered dictionary)
4108
    if afterkey:
4109
      buf.write("\n")
4110
      doindent = True
4111
    else:
4112
      doindent = False
4113
    for (key, val) in data:
4114
      if doindent:
4115
        buf.write(baseind * level)
4116
      else:
4117
        doindent = True
4118
      buf.write(key)
4119
      buf.write(": ")
4120
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4121
  elif isinstance(data, list):
4122
    if not data:
4123
      buf.write("\n")
4124
    else:
4125
      if afterkey:
4126
        buf.write("\n")
4127
        doindent = True
4128
      else:
4129
        doindent = False
4130
      for item in data:
4131
        if doindent:
4132
          buf.write(baseind * level)
4133
        else:
4134
          doindent = True
4135
        buf.write("-")
4136
        buf.write(baseind[1:])
4137
        _SerializeGenericInfo(buf, item, level + 1)
4138
  else:
4139
    # This branch should be only taken for strings, but it's practically
4140
    # impossible to guarantee that no other types are produced somewhere
4141
    buf.write(str(data))
4142
    buf.write("\n")
4143

    
4144

    
4145
def PrintGenericInfo(data):
4146
  """Print information formatted according to the hierarchy.
4147

4148
  The output is a valid YAML string.
4149

4150
  @param data: the data to print. It's a hierarchical structure whose elements
4151
      can be:
4152
        - dictionaries, where keys are strings and values are of any of the
4153
          types listed here
4154
        - lists of pairs (key, value), where key is a string and value is of
4155
          any of the types listed here; it's a way to encode ordered
4156
          dictionaries
4157
        - lists of any of the types listed here
4158
        - strings
4159

4160
  """
4161
  buf = StringIO()
4162
  _SerializeGenericInfo(buf, data, 0)
4163
  ToStdout(buf.getvalue().rstrip("\n"))