Statistics
| Branch: | Tag: | Revision:

root / lib / cli.py @ 4c6e8e1a

History | View | Annotate | Download (136.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Module dealing with command line parsing"""
23

    
24

    
25
import sys
26
import textwrap
27
import os.path
28
import time
29
import logging
30
import errno
31
import itertools
32
import shlex
33
from cStringIO import StringIO
34

    
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import constants
38
from ganeti import opcodes
39
from ganeti import luxi
40
from ganeti import ssconf
41
from ganeti import rpc
42
from ganeti import ssh
43
from ganeti import compat
44
from ganeti import netutils
45
from ganeti import qlang
46
from ganeti import objects
47
from ganeti import pathutils
48

    
49
from optparse import (OptionParser, TitledHelpFormatter,
50
                      Option, OptionValueError)
51

    
52

    
53
__all__ = [
54
  # Command line options
55
  "ABSOLUTE_OPT",
56
  "ADD_UIDS_OPT",
57
  "ADD_RESERVED_IPS_OPT",
58
  "ALLOCATABLE_OPT",
59
  "ALLOC_POLICY_OPT",
60
  "ALL_OPT",
61
  "ALLOW_FAILOVER_OPT",
62
  "AUTO_PROMOTE_OPT",
63
  "AUTO_REPLACE_OPT",
64
  "BACKEND_OPT",
65
  "BLK_OS_OPT",
66
  "CAPAB_MASTER_OPT",
67
  "CAPAB_VM_OPT",
68
  "CLEANUP_OPT",
69
  "CLUSTER_DOMAIN_SECRET_OPT",
70
  "CONFIRM_OPT",
71
  "CP_SIZE_OPT",
72
  "DEBUG_OPT",
73
  "DEBUG_SIMERR_OPT",
74
  "DISKIDX_OPT",
75
  "DISK_OPT",
76
  "DISK_PARAMS_OPT",
77
  "DISK_TEMPLATE_OPT",
78
  "DRAINED_OPT",
79
  "DRY_RUN_OPT",
80
  "DRBD_HELPER_OPT",
81
  "DST_NODE_OPT",
82
  "EARLY_RELEASE_OPT",
83
  "ENABLED_HV_OPT",
84
  "ENABLED_DISK_TEMPLATES_OPT",
85
  "ERROR_CODES_OPT",
86
  "FAILURE_ONLY_OPT",
87
  "FIELDS_OPT",
88
  "FILESTORE_DIR_OPT",
89
  "FILESTORE_DRIVER_OPT",
90
  "FORCE_FILTER_OPT",
91
  "FORCE_OPT",
92
  "FORCE_VARIANT_OPT",
93
  "GATEWAY_OPT",
94
  "GATEWAY6_OPT",
95
  "GLOBAL_FILEDIR_OPT",
96
  "HID_OS_OPT",
97
  "GLOBAL_SHARED_FILEDIR_OPT",
98
  "HOTPLUG_OPT",
99
  "HVLIST_OPT",
100
  "HVOPTS_OPT",
101
  "HYPERVISOR_OPT",
102
  "IALLOCATOR_OPT",
103
  "DEFAULT_IALLOCATOR_OPT",
104
  "IDENTIFY_DEFAULTS_OPT",
105
  "IGNORE_CONSIST_OPT",
106
  "IGNORE_ERRORS_OPT",
107
  "IGNORE_FAILURES_OPT",
108
  "IGNORE_OFFLINE_OPT",
109
  "IGNORE_REMOVE_FAILURES_OPT",
110
  "IGNORE_SECONDARIES_OPT",
111
  "IGNORE_SIZE_OPT",
112
  "INCLUDEDEFAULTS_OPT",
113
  "INTERVAL_OPT",
114
  "MAC_PREFIX_OPT",
115
  "MAINTAIN_NODE_HEALTH_OPT",
116
  "MASTER_NETDEV_OPT",
117
  "MASTER_NETMASK_OPT",
118
  "MC_OPT",
119
  "MIGRATION_MODE_OPT",
120
  "MODIFY_ETCHOSTS_OPT",
121
  "NET_OPT",
122
  "NETWORK_OPT",
123
  "NETWORK6_OPT",
124
  "NEW_CLUSTER_CERT_OPT",
125
  "NEW_CLUSTER_DOMAIN_SECRET_OPT",
126
  "NEW_CONFD_HMAC_KEY_OPT",
127
  "NEW_RAPI_CERT_OPT",
128
  "NEW_PRIMARY_OPT",
129
  "NEW_SECONDARY_OPT",
130
  "NEW_SPICE_CERT_OPT",
131
  "NIC_PARAMS_OPT",
132
  "NOCONFLICTSCHECK_OPT",
133
  "NODE_FORCE_JOIN_OPT",
134
  "NODE_LIST_OPT",
135
  "NODE_PLACEMENT_OPT",
136
  "NODEGROUP_OPT",
137
  "NODE_PARAMS_OPT",
138
  "NODE_POWERED_OPT",
139
  "NODRBD_STORAGE_OPT",
140
  "NOHDR_OPT",
141
  "NOIPCHECK_OPT",
142
  "NO_INSTALL_OPT",
143
  "NONAMECHECK_OPT",
144
  "NOLVM_STORAGE_OPT",
145
  "NOMODIFY_ETCHOSTS_OPT",
146
  "NOMODIFY_SSH_SETUP_OPT",
147
  "NONICS_OPT",
148
  "NONLIVE_OPT",
149
  "NONPLUS1_OPT",
150
  "NORUNTIME_CHGS_OPT",
151
  "NOSHUTDOWN_OPT",
152
  "NOSTART_OPT",
153
  "NOSSH_KEYCHECK_OPT",
154
  "NOVOTING_OPT",
155
  "NO_REMEMBER_OPT",
156
  "NWSYNC_OPT",
157
  "OFFLINE_INST_OPT",
158
  "ONLINE_INST_OPT",
159
  "ON_PRIMARY_OPT",
160
  "ON_SECONDARY_OPT",
161
  "OFFLINE_OPT",
162
  "OSPARAMS_OPT",
163
  "OS_OPT",
164
  "OS_SIZE_OPT",
165
  "OOB_TIMEOUT_OPT",
166
  "POWER_DELAY_OPT",
167
  "PREALLOC_WIPE_DISKS_OPT",
168
  "PRIMARY_IP_VERSION_OPT",
169
  "PRIMARY_ONLY_OPT",
170
  "PRIORITY_OPT",
171
  "RAPI_CERT_OPT",
172
  "READD_OPT",
173
  "REASON_OPT",
174
  "REBOOT_TYPE_OPT",
175
  "REMOVE_INSTANCE_OPT",
176
  "REMOVE_RESERVED_IPS_OPT",
177
  "REMOVE_UIDS_OPT",
178
  "RESERVED_LVS_OPT",
179
  "RUNTIME_MEM_OPT",
180
  "ROMAN_OPT",
181
  "SECONDARY_IP_OPT",
182
  "SECONDARY_ONLY_OPT",
183
  "SELECT_OS_OPT",
184
  "SEP_OPT",
185
  "SHOWCMD_OPT",
186
  "SHOW_MACHINE_OPT",
187
  "SHUTDOWN_TIMEOUT_OPT",
188
  "SINGLE_NODE_OPT",
189
  "SPECS_CPU_COUNT_OPT",
190
  "SPECS_DISK_COUNT_OPT",
191
  "SPECS_DISK_SIZE_OPT",
192
  "SPECS_MEM_SIZE_OPT",
193
  "SPECS_NIC_COUNT_OPT",
194
  "SPLIT_ISPECS_OPTS",
195
  "IPOLICY_STD_SPECS_OPT",
196
  "IPOLICY_DISK_TEMPLATES",
197
  "IPOLICY_VCPU_RATIO",
198
  "SPICE_CACERT_OPT",
199
  "SPICE_CERT_OPT",
200
  "SRC_DIR_OPT",
201
  "SRC_NODE_OPT",
202
  "SUBMIT_OPT",
203
  "STARTUP_PAUSED_OPT",
204
  "STATIC_OPT",
205
  "SYNC_OPT",
206
  "TAG_ADD_OPT",
207
  "TAG_SRC_OPT",
208
  "TIMEOUT_OPT",
209
  "TO_GROUP_OPT",
210
  "UIDPOOL_OPT",
211
  "USEUNITS_OPT",
212
  "USE_EXTERNAL_MIP_SCRIPT",
213
  "USE_REPL_NET_OPT",
214
  "VERBOSE_OPT",
215
  "VG_NAME_OPT",
216
  "WFSYNC_OPT",
217
  "YES_DOIT_OPT",
218
  "DISK_STATE_OPT",
219
  "HV_STATE_OPT",
220
  "IGNORE_IPOLICY_OPT",
221
  "INSTANCE_POLICY_OPTS",
222
  # Generic functions for CLI programs
223
  "ConfirmOperation",
224
  "CreateIPolicyFromOpts",
225
  "GenericMain",
226
  "GenericInstanceCreate",
227
  "GenericList",
228
  "GenericListFields",
229
  "GetClient",
230
  "GetOnlineNodes",
231
  "JobExecutor",
232
  "JobSubmittedException",
233
  "ParseTimespec",
234
  "RunWhileClusterStopped",
235
  "SubmitOpCode",
236
  "SubmitOrSend",
237
  "UsesRPC",
238
  # Formatting functions
239
  "ToStderr", "ToStdout",
240
  "FormatError",
241
  "FormatQueryResult",
242
  "FormatParamsDictInfo",
243
  "FormatPolicyInfo",
244
  "PrintIPolicyCommand",
245
  "PrintGenericInfo",
246
  "GenerateTable",
247
  "AskUser",
248
  "FormatTimestamp",
249
  "FormatLogMessage",
250
  # Tags functions
251
  "ListTags",
252
  "AddTags",
253
  "RemoveTags",
254
  # command line options support infrastructure
255
  "ARGS_MANY_INSTANCES",
256
  "ARGS_MANY_NODES",
257
  "ARGS_MANY_GROUPS",
258
  "ARGS_MANY_NETWORKS",
259
  "ARGS_NONE",
260
  "ARGS_ONE_INSTANCE",
261
  "ARGS_ONE_NODE",
262
  "ARGS_ONE_GROUP",
263
  "ARGS_ONE_OS",
264
  "ARGS_ONE_NETWORK",
265
  "ArgChoice",
266
  "ArgCommand",
267
  "ArgFile",
268
  "ArgGroup",
269
  "ArgHost",
270
  "ArgInstance",
271
  "ArgJobId",
272
  "ArgNetwork",
273
  "ArgNode",
274
  "ArgOs",
275
  "ArgExtStorage",
276
  "ArgSuggest",
277
  "ArgUnknown",
278
  "OPT_COMPL_INST_ADD_NODES",
279
  "OPT_COMPL_MANY_NODES",
280
  "OPT_COMPL_ONE_IALLOCATOR",
281
  "OPT_COMPL_ONE_INSTANCE",
282
  "OPT_COMPL_ONE_NODE",
283
  "OPT_COMPL_ONE_NODEGROUP",
284
  "OPT_COMPL_ONE_NETWORK",
285
  "OPT_COMPL_ONE_OS",
286
  "OPT_COMPL_ONE_EXTSTORAGE",
287
  "cli_option",
288
  "SplitNodeOption",
289
  "CalculateOSNames",
290
  "ParseFields",
291
  "COMMON_CREATE_OPTS",
292
  ]
293

    
294
NO_PREFIX = "no_"
295
UN_PREFIX = "-"
296

    
297
#: Priorities (sorted)
298
_PRIORITY_NAMES = [
299
  ("low", constants.OP_PRIO_LOW),
300
  ("normal", constants.OP_PRIO_NORMAL),
301
  ("high", constants.OP_PRIO_HIGH),
302
  ]
303

    
304
#: Priority dictionary for easier lookup
305
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
306
# we migrate to Python 2.6
307
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
308

    
309
# Query result status for clients
310
(QR_NORMAL,
311
 QR_UNKNOWN,
312
 QR_INCOMPLETE) = range(3)
313

    
314
#: Maximum batch size for ChooseJob
315
_CHOOSE_BATCH = 25
316

    
317

    
318
# constants used to create InstancePolicy dictionary
319
TISPECS_GROUP_TYPES = {
320
  constants.ISPECS_MIN: constants.VTYPE_INT,
321
  constants.ISPECS_MAX: constants.VTYPE_INT,
322
  }
323

    
324
TISPECS_CLUSTER_TYPES = {
325
  constants.ISPECS_MIN: constants.VTYPE_INT,
326
  constants.ISPECS_MAX: constants.VTYPE_INT,
327
  constants.ISPECS_STD: constants.VTYPE_INT,
328
  }
329

    
330
#: User-friendly names for query2 field types
331
_QFT_NAMES = {
332
  constants.QFT_UNKNOWN: "Unknown",
333
  constants.QFT_TEXT: "Text",
334
  constants.QFT_BOOL: "Boolean",
335
  constants.QFT_NUMBER: "Number",
336
  constants.QFT_UNIT: "Storage size",
337
  constants.QFT_TIMESTAMP: "Timestamp",
338
  constants.QFT_OTHER: "Custom",
339
  }
340

    
341

    
342
class _Argument:
343
  def __init__(self, min=0, max=None): # pylint: disable=W0622
344
    self.min = min
345
    self.max = max
346

    
347
  def __repr__(self):
348
    return ("<%s min=%s max=%s>" %
349
            (self.__class__.__name__, self.min, self.max))
350

    
351

    
352
class ArgSuggest(_Argument):
353
  """Suggesting argument.
354

355
  Value can be any of the ones passed to the constructor.
356

357
  """
358
  # pylint: disable=W0622
359
  def __init__(self, min=0, max=None, choices=None):
360
    _Argument.__init__(self, min=min, max=max)
361
    self.choices = choices
362

    
363
  def __repr__(self):
364
    return ("<%s min=%s max=%s choices=%r>" %
365
            (self.__class__.__name__, self.min, self.max, self.choices))
366

    
367

    
368
class ArgChoice(ArgSuggest):
369
  """Choice argument.
370

371
  Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
372
  but value must be one of the choices.
373

374
  """
375

    
376

    
377
class ArgUnknown(_Argument):
378
  """Unknown argument to program (e.g. determined at runtime).
379

380
  """
381

    
382

    
383
class ArgInstance(_Argument):
384
  """Instances argument.
385

386
  """
387

    
388

    
389
class ArgNode(_Argument):
390
  """Node argument.
391

392
  """
393

    
394

    
395
class ArgNetwork(_Argument):
396
  """Network argument.
397

398
  """
399

    
400

    
401
class ArgGroup(_Argument):
402
  """Node group argument.
403

404
  """
405

    
406

    
407
class ArgJobId(_Argument):
408
  """Job ID argument.
409

410
  """
411

    
412

    
413
class ArgFile(_Argument):
414
  """File path argument.
415

416
  """
417

    
418

    
419
class ArgCommand(_Argument):
420
  """Command argument.
421

422
  """
423

    
424

    
425
class ArgHost(_Argument):
426
  """Host argument.
427

428
  """
429

    
430

    
431
class ArgOs(_Argument):
432
  """OS argument.
433

434
  """
435

    
436

    
437
class ArgExtStorage(_Argument):
438
  """ExtStorage argument.
439

440
  """
441

    
442

    
443
ARGS_NONE = []
444
ARGS_MANY_INSTANCES = [ArgInstance()]
445
ARGS_MANY_NETWORKS = [ArgNetwork()]
446
ARGS_MANY_NODES = [ArgNode()]
447
ARGS_MANY_GROUPS = [ArgGroup()]
448
ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
449
ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
450
ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
451
# TODO
452
ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
453
ARGS_ONE_OS = [ArgOs(min=1, max=1)]
454

    
455

    
456
def _ExtractTagsObject(opts, args):
457
  """Extract the tag type object.
458

459
  Note that this function will modify its args parameter.
460

461
  """
462
  if not hasattr(opts, "tag_type"):
463
    raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject")
464
  kind = opts.tag_type
465
  if kind == constants.TAG_CLUSTER:
466
    retval = kind, None
467
  elif kind in (constants.TAG_NODEGROUP,
468
                constants.TAG_NODE,
469
                constants.TAG_NETWORK,
470
                constants.TAG_INSTANCE):
471
    if not args:
472
      raise errors.OpPrereqError("no arguments passed to the command",
473
                                 errors.ECODE_INVAL)
474
    name = args.pop(0)
475
    retval = kind, name
476
  else:
477
    raise errors.ProgrammerError("Unhandled tag type '%s'" % kind)
478
  return retval
479

    
480

    
481
def _ExtendTags(opts, args):
482
  """Extend the args if a source file has been given.
483

484
  This function will extend the tags with the contents of the file
485
  passed in the 'tags_source' attribute of the opts parameter. A file
486
  named '-' will be replaced by stdin.
487

488
  """
489
  fname = opts.tags_source
490
  if fname is None:
491
    return
492
  if fname == "-":
493
    new_fh = sys.stdin
494
  else:
495
    new_fh = open(fname, "r")
496
  new_data = []
497
  try:
498
    # we don't use the nice 'new_data = [line.strip() for line in fh]'
499
    # because of python bug 1633941
500
    while True:
501
      line = new_fh.readline()
502
      if not line:
503
        break
504
      new_data.append(line.strip())
505
  finally:
506
    new_fh.close()
507
  args.extend(new_data)
508

    
509

    
510
def ListTags(opts, args):
511
  """List the tags on a given object.
512

513
  This is a generic implementation that knows how to deal with all
514
  three cases of tag objects (cluster, node, instance). The opts
515
  argument is expected to contain a tag_type field denoting what
516
  object type we work on.
517

518
  """
519
  kind, name = _ExtractTagsObject(opts, args)
520
  cl = GetClient(query=True)
521
  result = cl.QueryTags(kind, name)
522
  result = list(result)
523
  result.sort()
524
  for tag in result:
525
    ToStdout(tag)
526

    
527

    
528
def AddTags(opts, args):
529
  """Add tags on a given object.
530

531
  This is a generic implementation that knows how to deal with all
532
  three cases of tag objects (cluster, node, instance). The opts
533
  argument is expected to contain a tag_type field denoting what
534
  object type we work on.
535

536
  """
537
  kind, name = _ExtractTagsObject(opts, args)
538
  _ExtendTags(opts, args)
539
  if not args:
540
    raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL)
541
  op = opcodes.OpTagsSet(kind=kind, name=name, tags=args)
542
  SubmitOrSend(op, opts)
543

    
544

    
545
def RemoveTags(opts, args):
546
  """Remove tags from a given object.
547

548
  This is a generic implementation that knows how to deal with all
549
  three cases of tag objects (cluster, node, instance). The opts
550
  argument is expected to contain a tag_type field denoting what
551
  object type we work on.
552

553
  """
554
  kind, name = _ExtractTagsObject(opts, args)
555
  _ExtendTags(opts, args)
556
  if not args:
557
    raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL)
558
  op = opcodes.OpTagsDel(kind=kind, name=name, tags=args)
559
  SubmitOrSend(op, opts)
560

    
561

    
562
def check_unit(option, opt, value): # pylint: disable=W0613
563
  """OptParsers custom converter for units.
564

565
  """
566
  try:
567
    return utils.ParseUnit(value)
568
  except errors.UnitParseError, err:
569
    raise OptionValueError("option %s: %s" % (opt, err))
570

    
571

    
572
def _SplitKeyVal(opt, data, parse_prefixes):
573
  """Convert a KeyVal string into a dict.
574

575
  This function will convert a key=val[,...] string into a dict. Empty
576
  values will be converted specially: keys which have the prefix 'no_'
577
  will have the value=False and the prefix stripped, keys with the prefix
578
  "-" will have value=None and the prefix stripped, and the others will
579
  have value=True.
580

581
  @type opt: string
582
  @param opt: a string holding the option name for which we process the
583
      data, used in building error messages
584
  @type data: string
585
  @param data: a string of the format key=val,key=val,...
586
  @type parse_prefixes: bool
587
  @param parse_prefixes: whether to handle prefixes specially
588
  @rtype: dict
589
  @return: {key=val, key=val}
590
  @raises errors.ParameterError: if there are duplicate keys
591

592
  """
593
  kv_dict = {}
594
  if data:
595
    for elem in utils.UnescapeAndSplit(data, sep=","):
596
      if "=" in elem:
597
        key, val = elem.split("=", 1)
598
      elif parse_prefixes:
599
        if elem.startswith(NO_PREFIX):
600
          key, val = elem[len(NO_PREFIX):], False
601
        elif elem.startswith(UN_PREFIX):
602
          key, val = elem[len(UN_PREFIX):], None
603
        else:
604
          key, val = elem, True
605
      else:
606
        raise errors.ParameterError("Missing value for key '%s' in option %s" %
607
                                    (elem, opt))
608
      if key in kv_dict:
609
        raise errors.ParameterError("Duplicate key '%s' in option %s" %
610
                                    (key, opt))
611
      kv_dict[key] = val
612
  return kv_dict
613

    
614

    
615
def _SplitIdentKeyVal(opt, value, parse_prefixes):
616
  """Helper function to parse "ident:key=val,key=val" options.
617

618
  @type opt: string
619
  @param opt: option name, used in error messages
620
  @type value: string
621
  @param value: expected to be in the format "ident:key=val,key=val,..."
622
  @type parse_prefixes: bool
623
  @param parse_prefixes: whether to handle prefixes specially (see
624
      L{_SplitKeyVal})
625
  @rtype: tuple
626
  @return: (ident, {key=val, key=val})
627
  @raises errors.ParameterError: in case of duplicates or other parsing errors
628

629
  """
630
  if ":" not in value:
631
    ident, rest = value, ""
632
  else:
633
    ident, rest = value.split(":", 1)
634

    
635
  if parse_prefixes and ident.startswith(NO_PREFIX):
636
    if rest:
637
      msg = "Cannot pass options when removing parameter groups: %s" % value
638
      raise errors.ParameterError(msg)
639
    retval = (ident[len(NO_PREFIX):], False)
640
  elif (parse_prefixes and ident.startswith(UN_PREFIX) and
641
        (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
642
    if rest:
643
      msg = "Cannot pass options when removing parameter groups: %s" % value
644
      raise errors.ParameterError(msg)
645
    retval = (ident[len(UN_PREFIX):], None)
646
  else:
647
    kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
648
    retval = (ident, kv_dict)
649
  return retval
650

    
651

    
652
def check_ident_key_val(option, opt, value):  # pylint: disable=W0613
653
  """Custom parser for ident:key=val,key=val options.
654

655
  This will store the parsed values as a tuple (ident, {key: val}). As such,
656
  multiple uses of this option via action=append is possible.
657

658
  """
659
  return _SplitIdentKeyVal(opt, value, True)
660

    
661

    
662
def check_key_val(option, opt, value):  # pylint: disable=W0613
663
  """Custom parser class for key=val,key=val options.
664

665
  This will store the parsed values as a dict {key: val}.
666

667
  """
668
  return _SplitKeyVal(opt, value, True)
669

    
670

    
671
def _SplitListKeyVal(opt, value):
672
  retval = {}
673
  for elem in value.split("/"):
674
    if not elem:
675
      raise errors.ParameterError("Empty section in option '%s'" % opt)
676
    (ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
677
    if ident in retval:
678
      msg = ("Duplicated parameter '%s' in parsing %s: %s" %
679
             (ident, opt, elem))
680
      raise errors.ParameterError(msg)
681
    retval[ident] = valdict
682
  return retval
683

    
684

    
685
def check_multilist_ident_key_val(_, opt, value):
686
  """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
687

688
  @rtype: list of dictionary
689
  @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
690

691
  """
692
  retval = []
693
  for line in value.split("//"):
694
    retval.append(_SplitListKeyVal(opt, line))
695
  return retval
696

    
697

    
698
def check_bool(option, opt, value): # pylint: disable=W0613
699
  """Custom parser for yes/no options.
700

701
  This will store the parsed value as either True or False.
702

703
  """
704
  value = value.lower()
705
  if value == constants.VALUE_FALSE or value == "no":
706
    return False
707
  elif value == constants.VALUE_TRUE or value == "yes":
708
    return True
709
  else:
710
    raise errors.ParameterError("Invalid boolean value '%s'" % value)
711

    
712

    
713
def check_list(option, opt, value): # pylint: disable=W0613
714
  """Custom parser for comma-separated lists.
715

716
  """
717
  # we have to make this explicit check since "".split(",") is [""],
718
  # not an empty list :(
719
  if not value:
720
    return []
721
  else:
722
    return utils.UnescapeAndSplit(value)
723

    
724

    
725
def check_maybefloat(option, opt, value): # pylint: disable=W0613
726
  """Custom parser for float numbers which might be also defaults.
727

728
  """
729
  value = value.lower()
730

    
731
  if value == constants.VALUE_DEFAULT:
732
    return value
733
  else:
734
    return float(value)
735

    
736

    
737
# completion_suggestion is normally a list. Using numeric values not evaluating
738
# to False for dynamic completion.
739
(OPT_COMPL_MANY_NODES,
740
 OPT_COMPL_ONE_NODE,
741
 OPT_COMPL_ONE_INSTANCE,
742
 OPT_COMPL_ONE_OS,
743
 OPT_COMPL_ONE_EXTSTORAGE,
744
 OPT_COMPL_ONE_IALLOCATOR,
745
 OPT_COMPL_ONE_NETWORK,
746
 OPT_COMPL_INST_ADD_NODES,
747
 OPT_COMPL_ONE_NODEGROUP) = range(100, 109)
748

    
749
OPT_COMPL_ALL = compat.UniqueFrozenset([
750
  OPT_COMPL_MANY_NODES,
751
  OPT_COMPL_ONE_NODE,
752
  OPT_COMPL_ONE_INSTANCE,
753
  OPT_COMPL_ONE_OS,
754
  OPT_COMPL_ONE_EXTSTORAGE,
755
  OPT_COMPL_ONE_IALLOCATOR,
756
  OPT_COMPL_ONE_NETWORK,
757
  OPT_COMPL_INST_ADD_NODES,
758
  OPT_COMPL_ONE_NODEGROUP,
759
  ])
760

    
761

    
762
class CliOption(Option):
763
  """Custom option class for optparse.
764

765
  """
766
  ATTRS = Option.ATTRS + [
767
    "completion_suggest",
768
    ]
769
  TYPES = Option.TYPES + (
770
    "multilistidentkeyval",
771
    "identkeyval",
772
    "keyval",
773
    "unit",
774
    "bool",
775
    "list",
776
    "maybefloat",
777
    )
778
  TYPE_CHECKER = Option.TYPE_CHECKER.copy()
779
  TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
780
  TYPE_CHECKER["identkeyval"] = check_ident_key_val
781
  TYPE_CHECKER["keyval"] = check_key_val
782
  TYPE_CHECKER["unit"] = check_unit
783
  TYPE_CHECKER["bool"] = check_bool
784
  TYPE_CHECKER["list"] = check_list
785
  TYPE_CHECKER["maybefloat"] = check_maybefloat
786

    
787

    
788
# optparse.py sets make_option, so we do it for our own option class, too
789
cli_option = CliOption
790

    
791

    
792
_YORNO = "yes|no"
793

    
794
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
795
                       help="Increase debugging level")
796

    
797
NOHDR_OPT = cli_option("--no-headers", default=False,
798
                       action="store_true", dest="no_headers",
799
                       help="Don't display column headers")
800

    
801
SEP_OPT = cli_option("--separator", default=None,
802
                     action="store", dest="separator",
803
                     help=("Separator between output fields"
804
                           " (defaults to one space)"))
805

    
806
USEUNITS_OPT = cli_option("--units", default=None,
807
                          dest="units", choices=("h", "m", "g", "t"),
808
                          help="Specify units for output (one of h/m/g/t)")
809

    
810
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
811
                        type="string", metavar="FIELDS",
812
                        help="Comma separated list of output fields")
813

    
814
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
815
                       default=False, help="Force the operation")
816

    
817
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
818
                         default=False, help="Do not require confirmation")
819

    
820
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
821
                                  action="store_true", default=False,
822
                                  help=("Ignore offline nodes and do as much"
823
                                        " as possible"))
824

    
825
TAG_ADD_OPT = cli_option("--tags", dest="tags",
826
                         default=None, help="Comma-separated list of instance"
827
                                            " tags")
828

    
829
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
830
                         default=None, help="File with tag names")
831

    
832
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
833
                        default=False, action="store_true",
834
                        help=("Submit the job and return the job ID, but"
835
                              " don't wait for the job to finish"))
836

    
837
SYNC_OPT = cli_option("--sync", dest="do_locking",
838
                      default=False, action="store_true",
839
                      help=("Grab locks while doing the queries"
840
                            " in order to ensure more consistent results"))
841

    
842
DRY_RUN_OPT = cli_option("--dry-run", default=False,
843
                         action="store_true",
844
                         help=("Do not execute the operation, just run the"
845
                               " check steps and verify if it could be"
846
                               " executed"))
847

    
848
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
849
                         action="store_true",
850
                         help="Increase the verbosity of the operation")
851

    
852
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
853
                              action="store_true", dest="simulate_errors",
854
                              help="Debugging option that makes the operation"
855
                              " treat most runtime checks as failed")
856

    
857
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
858
                        default=True, action="store_false",
859
                        help="Don't wait for sync (DANGEROUS!)")
860

    
861
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
862
                        default=False, action="store_true",
863
                        help="Wait for disks to sync")
864

    
865
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
866
                             action="store_true", default=False,
867
                             help="Enable offline instance")
868

    
869
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
870
                              action="store_true", default=False,
871
                              help="Disable down instance")
872

    
873
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
874
                               help=("Custom disk setup (%s)" %
875
                                     utils.CommaJoin(constants.DISK_TEMPLATES)),
876
                               default=None, metavar="TEMPL",
877
                               choices=list(constants.DISK_TEMPLATES))
878

    
879
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
880
                        help="Do not create any network cards for"
881
                        " the instance")
882

    
883
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
884
                               help="Relative path under default cluster-wide"
885
                               " file storage dir to store file-based disks",
886
                               default=None, metavar="<DIR>")
887

    
888
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
889
                                  help="Driver to use for image files",
890
                                  default=None, metavar="<DRIVER>",
891
                                  choices=list(constants.FILE_DRIVER))
892

    
893
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
894
                            help="Select nodes for the instance automatically"
895
                            " using the <NAME> iallocator plugin",
896
                            default=None, type="string",
897
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
898

    
899
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
900
                                    metavar="<NAME>",
901
                                    help="Set the default instance"
902
                                    " allocator plugin",
903
                                    default=None, type="string",
904
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
905

    
906
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
907
                    metavar="<os>",
908
                    completion_suggest=OPT_COMPL_ONE_OS)
909

    
910
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
911
                          type="keyval", default={},
912
                          help="OS parameters")
913

    
914
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
915
                               action="store_true", default=False,
916
                               help="Force an unknown variant")
917

    
918
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
919
                            action="store_true", default=False,
920
                            help="Do not install the OS (will"
921
                            " enable no-start)")
922

    
923
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
924
                                dest="allow_runtime_chgs",
925
                                default=True, action="store_false",
926
                                help="Don't allow runtime changes")
927

    
928
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
929
                         type="keyval", default={},
930
                         help="Backend parameters")
931

    
932
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
933
                        default={}, dest="hvparams",
934
                        help="Hypervisor parameters")
935

    
936
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
937
                             help="Disk template parameters, in the format"
938
                             " template:option=value,option=value,...",
939
                             type="identkeyval", action="append", default=[])
940

    
941
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
942
                                 type="keyval", default={},
943
                                 help="Memory size specs: list of key=value,"
944
                                " where key is one of min, max, std"
945
                                 " (in MB or using a unit)")
946

    
947
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
948
                                 type="keyval", default={},
949
                                 help="CPU count specs: list of key=value,"
950
                                 " where key is one of min, max, std")
951

    
952
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
953
                                  dest="ispecs_disk_count",
954
                                  type="keyval", default={},
955
                                  help="Disk count specs: list of key=value,"
956
                                  " where key is one of min, max, std")
957

    
958
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
959
                                 type="keyval", default={},
960
                                 help="Disk size specs: list of key=value,"
961
                                 " where key is one of min, max, std"
962
                                 " (in MB or using a unit)")
963

    
964
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
965
                                 type="keyval", default={},
966
                                 help="NIC count specs: list of key=value,"
967
                                 " where key is one of min, max, std")
968

    
969
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
970
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
971
                                      dest="ipolicy_bounds_specs",
972
                                      type="multilistidentkeyval", default=None,
973
                                      help="Complete instance specs limits")
974

    
975
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
976
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
977
                                   dest="ipolicy_std_specs",
978
                                   type="keyval", default=None,
979
                                   help="Complte standard instance specs")
980

    
981
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
982
                                    dest="ipolicy_disk_templates",
983
                                    type="list", default=None,
984
                                    help="Comma-separated list of"
985
                                    " enabled disk templates")
986

    
987
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
988
                                 dest="ipolicy_vcpu_ratio",
989
                                 type="maybefloat", default=None,
990
                                 help="The maximum allowed vcpu-to-cpu ratio")
991

    
992
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
993
                                   dest="ipolicy_spindle_ratio",
994
                                   type="maybefloat", default=None,
995
                                   help=("The maximum allowed instances to"
996
                                         " spindle ratio"))
997

    
998
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
999
                            help="Hypervisor and hypervisor options, in the"
1000
                            " format hypervisor:option=value,option=value,...",
1001
                            default=None, type="identkeyval")
1002

    
1003
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
1004
                        help="Hypervisor and hypervisor options, in the"
1005
                        " format hypervisor:option=value,option=value,...",
1006
                        default=[], action="append", type="identkeyval")
1007

    
1008
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
1009
                           action="store_false",
1010
                           help="Don't check that the instance's IP"
1011
                           " is alive")
1012

    
1013
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
1014
                             default=True, action="store_false",
1015
                             help="Don't check that the instance's name"
1016
                             " is resolvable")
1017

    
1018
NET_OPT = cli_option("--net",
1019
                     help="NIC parameters", default=[],
1020
                     dest="nics", action="append", type="identkeyval")
1021

    
1022
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
1023
                      dest="disks", action="append", type="identkeyval")
1024

    
1025
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
1026
                         help="Comma-separated list of disks"
1027
                         " indices to act on (e.g. 0,2) (optional,"
1028
                         " defaults to all disks)")
1029

    
1030
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
1031
                         help="Enforces a single-disk configuration using the"
1032
                         " given disk size, in MiB unless a suffix is used",
1033
                         default=None, type="unit", metavar="<size>")
1034

    
1035
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
1036
                                dest="ignore_consistency",
1037
                                action="store_true", default=False,
1038
                                help="Ignore the consistency of the disks on"
1039
                                " the secondary")
1040

    
1041
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
1042
                                dest="allow_failover",
1043
                                action="store_true", default=False,
1044
                                help="If migration is not possible fallback to"
1045
                                     " failover")
1046

    
1047
NONLIVE_OPT = cli_option("--non-live", dest="live",
1048
                         default=True, action="store_false",
1049
                         help="Do a non-live migration (this usually means"
1050
                         " freeze the instance, save the state, transfer and"
1051
                         " only then resume running on the secondary node)")
1052

    
1053
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
1054
                                default=None,
1055
                                choices=list(constants.HT_MIGRATION_MODES),
1056
                                help="Override default migration mode (choose"
1057
                                " either live or non-live")
1058

    
1059
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
1060
                                help="Target node and optional secondary node",
1061
                                metavar="<pnode>[:<snode>]",
1062
                                completion_suggest=OPT_COMPL_INST_ADD_NODES)
1063

    
1064
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
1065
                           action="append", metavar="<node>",
1066
                           help="Use only this node (can be used multiple"
1067
                           " times, if not given defaults to all nodes)",
1068
                           completion_suggest=OPT_COMPL_ONE_NODE)
1069

    
1070
NODEGROUP_OPT_NAME = "--node-group"
1071
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
1072
                           dest="nodegroup",
1073
                           help="Node group (name or uuid)",
1074
                           metavar="<nodegroup>",
1075
                           default=None, type="string",
1076
                           completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1077

    
1078
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
1079
                             metavar="<node>",
1080
                             completion_suggest=OPT_COMPL_ONE_NODE)
1081

    
1082
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
1083
                         action="store_false",
1084
                         help="Don't start the instance after creation")
1085

    
1086
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
1087
                         action="store_true", default=False,
1088
                         help="Show command instead of executing it")
1089

    
1090
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
1091
                         default=False, action="store_true",
1092
                         help="Instead of performing the migration/failover,"
1093
                         " try to recover from a failed cleanup. This is safe"
1094
                         " to run even if the instance is healthy, but it"
1095
                         " will create extra replication traffic and "
1096
                         " disrupt briefly the replication (like during the"
1097
                         " migration/failover")
1098

    
1099
STATIC_OPT = cli_option("-s", "--static", dest="static",
1100
                        action="store_true", default=False,
1101
                        help="Only show configuration data, not runtime data")
1102

    
1103
ALL_OPT = cli_option("--all", dest="show_all",
1104
                     default=False, action="store_true",
1105
                     help="Show info on all instances on the cluster."
1106
                     " This can take a long time to run, use wisely")
1107

    
1108
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
1109
                           action="store_true", default=False,
1110
                           help="Interactive OS reinstall, lists available"
1111
                           " OS templates for selection")
1112

    
1113
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
1114
                                 action="store_true", default=False,
1115
                                 help="Remove the instance from the cluster"
1116
                                 " configuration even if there are failures"
1117
                                 " during the removal process")
1118

    
1119
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
1120
                                        dest="ignore_remove_failures",
1121
                                        action="store_true", default=False,
1122
                                        help="Remove the instance from the"
1123
                                        " cluster configuration even if there"
1124
                                        " are failures during the removal"
1125
                                        " process")
1126

    
1127
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
1128
                                 action="store_true", default=False,
1129
                                 help="Remove the instance from the cluster")
1130

    
1131
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
1132
                               help="Specifies the new node for the instance",
1133
                               metavar="NODE", default=None,
1134
                               completion_suggest=OPT_COMPL_ONE_NODE)
1135

    
1136
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
1137
                               help="Specifies the new secondary node",
1138
                               metavar="NODE", default=None,
1139
                               completion_suggest=OPT_COMPL_ONE_NODE)
1140

    
1141
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
1142
                             help="Specifies the new primary node",
1143
                             metavar="<node>", default=None,
1144
                             completion_suggest=OPT_COMPL_ONE_NODE)
1145

    
1146
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
1147
                            default=False, action="store_true",
1148
                            help="Replace the disk(s) on the primary"
1149
                                 " node (applies only to internally mirrored"
1150
                                 " disk templates, e.g. %s)" %
1151
                                 utils.CommaJoin(constants.DTS_INT_MIRROR))
1152

    
1153
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
1154
                              default=False, action="store_true",
1155
                              help="Replace the disk(s) on the secondary"
1156
                                   " node (applies only to internally mirrored"
1157
                                   " disk templates, e.g. %s)" %
1158
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1159

    
1160
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
1161
                              default=False, action="store_true",
1162
                              help="Lock all nodes and auto-promote as needed"
1163
                              " to MC status")
1164

    
1165
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
1166
                              default=False, action="store_true",
1167
                              help="Automatically replace faulty disks"
1168
                                   " (applies only to internally mirrored"
1169
                                   " disk templates, e.g. %s)" %
1170
                                   utils.CommaJoin(constants.DTS_INT_MIRROR))
1171

    
1172
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
1173
                             default=False, action="store_true",
1174
                             help="Ignore current recorded size"
1175
                             " (useful for forcing activation when"
1176
                             " the recorded size is wrong)")
1177

    
1178
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
1179
                          metavar="<node>",
1180
                          completion_suggest=OPT_COMPL_ONE_NODE)
1181

    
1182
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
1183
                         metavar="<dir>")
1184

    
1185
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
1186
                              help="Specify the secondary ip for the node",
1187
                              metavar="ADDRESS", default=None)
1188

    
1189
READD_OPT = cli_option("--readd", dest="readd",
1190
                       default=False, action="store_true",
1191
                       help="Readd old node after replacing it")
1192

    
1193
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
1194
                                default=True, action="store_false",
1195
                                help="Disable SSH key fingerprint checking")
1196

    
1197
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
1198
                                 default=False, action="store_true",
1199
                                 help="Force the joining of a node")
1200

    
1201
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
1202
                    type="bool", default=None, metavar=_YORNO,
1203
                    help="Set the master_candidate flag on the node")
1204

    
1205
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
1206
                         type="bool", default=None,
1207
                         help=("Set the offline flag on the node"
1208
                               " (cluster does not communicate with offline"
1209
                               " nodes)"))
1210

    
1211
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
1212
                         type="bool", default=None,
1213
                         help=("Set the drained flag on the node"
1214
                               " (excluded from allocation operations)"))
1215

    
1216
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1217
                              type="bool", default=None, metavar=_YORNO,
1218
                              help="Set the master_capable flag on the node")
1219

    
1220
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1221
                          type="bool", default=None, metavar=_YORNO,
1222
                          help="Set the vm_capable flag on the node")
1223

    
1224
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1225
                             type="bool", default=None, metavar=_YORNO,
1226
                             help="Set the allocatable flag on a volume")
1227

    
1228
NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
1229
                               help="Disable support for lvm based instances"
1230
                               " (cluster-wide)",
1231
                               action="store_false", default=True)
1232

    
1233
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
1234
                            dest="enabled_hypervisors",
1235
                            help="Comma-separated list of hypervisors",
1236
                            type="string", default=None)
1237

    
1238
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
1239
                                        dest="enabled_disk_templates",
1240
                                        help="Comma-separated list of "
1241
                                             "disk templates",
1242
                                        type="string", default=None)
1243

    
1244
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
1245
                            type="keyval", default={},
1246
                            help="NIC parameters")
1247

    
1248
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
1249
                         dest="candidate_pool_size", type="int",
1250
                         help="Set the candidate pool size")
1251

    
1252
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
1253
                         help=("Enables LVM and specifies the volume group"
1254
                               " name (cluster-wide) for disk allocation"
1255
                               " [%s]" % constants.DEFAULT_VG),
1256
                         metavar="VG", default=None)
1257

    
1258
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
1259
                          help="Destroy cluster", action="store_true")
1260

    
1261
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
1262
                          help="Skip node agreement check (dangerous)",
1263
                          action="store_true", default=False)
1264

    
1265
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
1266
                            help="Specify the mac prefix for the instance IP"
1267
                            " addresses, in the format XX:XX:XX",
1268
                            metavar="PREFIX",
1269
                            default=None)
1270

    
1271
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
1272
                               help="Specify the node interface (cluster-wide)"
1273
                               " on which the master IP address will be added"
1274
                               " (cluster init default: %s)" %
1275
                               constants.DEFAULT_BRIDGE,
1276
                               metavar="NETDEV",
1277
                               default=None)
1278

    
1279
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
1280
                                help="Specify the netmask of the master IP",
1281
                                metavar="NETMASK",
1282
                                default=None)
1283

    
1284
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1285
                                     dest="use_external_mip_script",
1286
                                     help="Specify whether to run a"
1287
                                     " user-provided script for the master"
1288
                                     " IP address turnup and"
1289
                                     " turndown operations",
1290
                                     type="bool", metavar=_YORNO, default=None)
1291

    
1292
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1293
                                help="Specify the default directory (cluster-"
1294
                                "wide) for storing the file-based disks [%s]" %
1295
                                pathutils.DEFAULT_FILE_STORAGE_DIR,
1296
                                metavar="DIR",
1297
                                default=pathutils.DEFAULT_FILE_STORAGE_DIR)
1298

    
1299
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1300
  "--shared-file-storage-dir",
1301
  dest="shared_file_storage_dir",
1302
  help="Specify the default directory (cluster-wide) for storing the"
1303
  " shared file-based disks [%s]" %
1304
  pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
1305
  metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR)
1306

    
1307
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1308
                                   help="Don't modify %s" % pathutils.ETC_HOSTS,
1309
                                   action="store_false", default=True)
1310

    
1311
MODIFY_ETCHOSTS_OPT = \
1312
 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
1313
            default=None, type="bool",
1314
            help="Defines whether the cluster should autonomously modify"
1315
            " and keep in sync the /etc/hosts file of the nodes")
1316

    
1317
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
1318
                                    help="Don't initialize SSH keys",
1319
                                    action="store_false", default=True)
1320

    
1321
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
1322
                             help="Enable parseable error messages",
1323
                             action="store_true", default=False)
1324

    
1325
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
1326
                          help="Skip N+1 memory redundancy tests",
1327
                          action="store_true", default=False)
1328

    
1329
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
1330
                             help="Type of reboot: soft/hard/full",
1331
                             default=constants.INSTANCE_REBOOT_HARD,
1332
                             metavar="<REBOOT>",
1333
                             choices=list(constants.REBOOT_TYPES))
1334

    
1335
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
1336
                                    dest="ignore_secondaries",
1337
                                    default=False, action="store_true",
1338
                                    help="Ignore errors from secondaries")
1339

    
1340
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1341
                            action="store_false", default=True,
1342
                            help="Don't shutdown the instance (unsafe)")
1343

    
1344
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1345
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1346
                         help="Maximum time to wait")
1347

    
1348
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1349
                                  dest="shutdown_timeout", type="int",
1350
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1351
                                  help="Maximum time to wait for instance"
1352
                                  " shutdown")
1353

    
1354
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1355
                          default=None,
1356
                          help=("Number of seconds between repetions of the"
1357
                                " command"))
1358

    
1359
EARLY_RELEASE_OPT = cli_option("--early-release",
1360
                               dest="early_release", default=False,
1361
                               action="store_true",
1362
                               help="Release the locks on the secondary"
1363
                               " node(s) early")
1364

    
1365
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1366
                                  dest="new_cluster_cert",
1367
                                  default=False, action="store_true",
1368
                                  help="Generate a new cluster certificate")
1369

    
1370
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1371
                           default=None,
1372
                           help="File containing new RAPI certificate")
1373

    
1374
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1375
                               default=None, action="store_true",
1376
                               help=("Generate a new self-signed RAPI"
1377
                                     " certificate"))
1378

    
1379
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1380
                            default=None,
1381
                            help="File containing new SPICE certificate")
1382

    
1383
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1384
                              default=None,
1385
                              help="File containing the certificate of the CA"
1386
                              " which signed the SPICE certificate")
1387

    
1388
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1389
                                dest="new_spice_cert", default=None,
1390
                                action="store_true",
1391
                                help=("Generate a new self-signed SPICE"
1392
                                      " certificate"))
1393

    
1394
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1395
                                    dest="new_confd_hmac_key",
1396
                                    default=False, action="store_true",
1397
                                    help=("Create a new HMAC key for %s" %
1398
                                          constants.CONFD))
1399

    
1400
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1401
                                       dest="cluster_domain_secret",
1402
                                       default=None,
1403
                                       help=("Load new new cluster domain"
1404
                                             " secret from file"))
1405

    
1406
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1407
                                           dest="new_cluster_domain_secret",
1408
                                           default=False, action="store_true",
1409
                                           help=("Create a new cluster domain"
1410
                                                 " secret"))
1411

    
1412
USE_REPL_NET_OPT = cli_option("--use-replication-network",
1413
                              dest="use_replication_network",
1414
                              help="Whether to use the replication network"
1415
                              " for talking to the nodes",
1416
                              action="store_true", default=False)
1417

    
1418
MAINTAIN_NODE_HEALTH_OPT = \
1419
    cli_option("--maintain-node-health", dest="maintain_node_health",
1420
               metavar=_YORNO, default=None, type="bool",
1421
               help="Configure the cluster to automatically maintain node"
1422
               " health, by shutting down unknown instances, shutting down"
1423
               " unknown DRBD devices, etc.")
1424

    
1425
IDENTIFY_DEFAULTS_OPT = \
1426
    cli_option("--identify-defaults", dest="identify_defaults",
1427
               default=False, action="store_true",
1428
               help="Identify which saved instance parameters are equal to"
1429
               " the current cluster defaults and set them as such, instead"
1430
               " of marking them as overridden")
1431

    
1432
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1433
                         action="store", dest="uid_pool",
1434
                         help=("A list of user-ids or user-id"
1435
                               " ranges separated by commas"))
1436

    
1437
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1438
                          action="store", dest="add_uids",
1439
                          help=("A list of user-ids or user-id"
1440
                                " ranges separated by commas, to be"
1441
                                " added to the user-id pool"))
1442

    
1443
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1444
                             action="store", dest="remove_uids",
1445
                             help=("A list of user-ids or user-id"
1446
                                   " ranges separated by commas, to be"
1447
                                   " removed from the user-id pool"))
1448

    
1449
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1450
                              action="store", dest="reserved_lvs",
1451
                              help=("A comma-separated list of reserved"
1452
                                    " logical volumes names, that will be"
1453
                                    " ignored by cluster verify"))
1454

    
1455
ROMAN_OPT = cli_option("--roman",
1456
                       dest="roman_integers", default=False,
1457
                       action="store_true",
1458
                       help="Use roman numbers for positive integers")
1459

    
1460
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1461
                             action="store", default=None,
1462
                             help="Specifies usermode helper for DRBD")
1463

    
1464
NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1465
                                action="store_false", default=True,
1466
                                help="Disable support for DRBD")
1467

    
1468
PRIMARY_IP_VERSION_OPT = \
1469
    cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1470
               action="store", dest="primary_ip_version",
1471
               metavar="%d|%d" % (constants.IP4_VERSION,
1472
                                  constants.IP6_VERSION),
1473
               help="Cluster-wide IP version for primary IP")
1474

    
1475
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
1476
                              action="store_true",
1477
                              help="Show machine name for every line in output")
1478

    
1479
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
1480
                              action="store_true",
1481
                              help=("Hide successful results and show failures"
1482
                                    " only (determined by the exit code)"))
1483

    
1484
REASON_OPT = cli_option("--reason", default=None,
1485
                        help="The reason for executing the command")
1486

    
1487

    
1488
def _PriorityOptionCb(option, _, value, parser):
1489
  """Callback for processing C{--priority} option.
1490

1491
  """
1492
  value = _PRIONAME_TO_VALUE[value]
1493

    
1494
  setattr(parser.values, option.dest, value)
1495

    
1496

    
1497
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1498
                          metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1499
                          choices=_PRIONAME_TO_VALUE.keys(),
1500
                          action="callback", type="choice",
1501
                          callback=_PriorityOptionCb,
1502
                          help="Priority for opcode processing")
1503

    
1504
HID_OS_OPT = cli_option("--hidden", dest="hidden",
1505
                        type="bool", default=None, metavar=_YORNO,
1506
                        help="Sets the hidden flag on the OS")
1507

    
1508
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1509
                        type="bool", default=None, metavar=_YORNO,
1510
                        help="Sets the blacklisted flag on the OS")
1511

    
1512
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1513
                                     type="bool", metavar=_YORNO,
1514
                                     dest="prealloc_wipe_disks",
1515
                                     help=("Wipe disks prior to instance"
1516
                                           " creation"))
1517

    
1518
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1519
                             type="keyval", default=None,
1520
                             help="Node parameters")
1521

    
1522
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1523
                              action="store", metavar="POLICY", default=None,
1524
                              help="Allocation policy for the node group")
1525

    
1526
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1527
                              type="bool", metavar=_YORNO,
1528
                              dest="node_powered",
1529
                              help="Specify if the SoR for node is powered")
1530

    
1531
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1532
                             default=constants.OOB_TIMEOUT,
1533
                             help="Maximum time to wait for out-of-band helper")
1534

    
1535
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1536
                             default=constants.OOB_POWER_DELAY,
1537
                             help="Time in seconds to wait between power-ons")
1538

    
1539
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
1540
                              action="store_true", default=False,
1541
                              help=("Whether command argument should be treated"
1542
                                    " as filter"))
1543

    
1544
NO_REMEMBER_OPT = cli_option("--no-remember",
1545
                             dest="no_remember",
1546
                             action="store_true", default=False,
1547
                             help="Perform but do not record the change"
1548
                             " in the configuration")
1549

    
1550
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
1551
                              default=False, action="store_true",
1552
                              help="Evacuate primary instances only")
1553

    
1554
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
1555
                                default=False, action="store_true",
1556
                                help="Evacuate secondary instances only"
1557
                                     " (applies only to internally mirrored"
1558
                                     " disk templates, e.g. %s)" %
1559
                                     utils.CommaJoin(constants.DTS_INT_MIRROR))
1560

    
1561
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
1562
                                action="store_true", default=False,
1563
                                help="Pause instance at startup")
1564

    
1565
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
1566
                          help="Destination node group (name or uuid)",
1567
                          default=None, action="append",
1568
                          completion_suggest=OPT_COMPL_ONE_NODEGROUP)
1569

    
1570
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
1571
                               action="append", dest="ignore_errors",
1572
                               choices=list(constants.CV_ALL_ECODES_STRINGS),
1573
                               help="Error code to be ignored")
1574

    
1575
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
1576
                            action="append",
1577
                            help=("Specify disk state information in the"
1578
                                  " format"
1579
                                  " storage_type/identifier:option=value,...;"
1580
                                  " note this is unused for now"),
1581
                            type="identkeyval")
1582

    
1583
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
1584
                          action="append",
1585
                          help=("Specify hypervisor state information in the"
1586
                                " format hypervisor:option=value,...;"
1587
                                " note this is unused for now"),
1588
                          type="identkeyval")
1589

    
1590
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
1591
                                action="store_true", default=False,
1592
                                help="Ignore instance policy violations")
1593

    
1594
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
1595
                             help="Sets the instance's runtime memory,"
1596
                             " ballooning it up or down to the new value",
1597
                             default=None, type="unit", metavar="<size>")
1598

    
1599
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
1600
                          action="store_true", default=False,
1601
                          help="Marks the grow as absolute instead of the"
1602
                          " (default) relative mode")
1603

    
1604
NETWORK_OPT = cli_option("--network",
1605
                         action="store", default=None, dest="network",
1606
                         help="IP network in CIDR notation")
1607

    
1608
GATEWAY_OPT = cli_option("--gateway",
1609
                         action="store", default=None, dest="gateway",
1610
                         help="IP address of the router (gateway)")
1611

    
1612
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
1613
                                  action="store", default=None,
1614
                                  dest="add_reserved_ips",
1615
                                  help="Comma-separated list of"
1616
                                  " reserved IPs to add")
1617

    
1618
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
1619
                                     action="store", default=None,
1620
                                     dest="remove_reserved_ips",
1621
                                     help="Comma-delimited list of"
1622
                                     " reserved IPs to remove")
1623

    
1624
NETWORK6_OPT = cli_option("--network6",
1625
                          action="store", default=None, dest="network6",
1626
                          help="IP network in CIDR notation")
1627

    
1628
GATEWAY6_OPT = cli_option("--gateway6",
1629
                          action="store", default=None, dest="gateway6",
1630
                          help="IP6 address of the router (gateway)")
1631

    
1632
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
1633
                                  dest="conflicts_check",
1634
                                  default=True,
1635
                                  action="store_false",
1636
                                  help="Don't check for conflicting IPs")
1637

    
1638
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
1639
                                 default=False, action="store_true",
1640
                                 help="Include default values")
1641

    
1642
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
1643
                         action="store_true", default=False,
1644
                         help="Hotplug supported devices (NICs and Disks)")
1645

    
1646
#: Options provided by all commands
1647
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
1648

    
1649
# common options for creating instances. add and import then add their own
1650
# specific ones.
1651
COMMON_CREATE_OPTS = [
1652
  BACKEND_OPT,
1653
  DISK_OPT,
1654
  DISK_TEMPLATE_OPT,
1655
  FILESTORE_DIR_OPT,
1656
  FILESTORE_DRIVER_OPT,
1657
  HYPERVISOR_OPT,
1658
  IALLOCATOR_OPT,
1659
  NET_OPT,
1660
  NODE_PLACEMENT_OPT,
1661
  NOIPCHECK_OPT,
1662
  NOCONFLICTSCHECK_OPT,
1663
  NONAMECHECK_OPT,
1664
  NONICS_OPT,
1665
  NWSYNC_OPT,
1666
  OSPARAMS_OPT,
1667
  OS_SIZE_OPT,
1668
  SUBMIT_OPT,
1669
  TAG_ADD_OPT,
1670
  DRY_RUN_OPT,
1671
  PRIORITY_OPT,
1672
  ]
1673

    
1674
# common instance policy options
1675
INSTANCE_POLICY_OPTS = [
1676
  IPOLICY_BOUNDS_SPECS_OPT,
1677
  IPOLICY_DISK_TEMPLATES,
1678
  IPOLICY_VCPU_RATIO,
1679
  IPOLICY_SPINDLE_RATIO,
1680
  ]
1681

    
1682
# instance policy split specs options
1683
SPLIT_ISPECS_OPTS = [
1684
  SPECS_CPU_COUNT_OPT,
1685
  SPECS_DISK_COUNT_OPT,
1686
  SPECS_DISK_SIZE_OPT,
1687
  SPECS_MEM_SIZE_OPT,
1688
  SPECS_NIC_COUNT_OPT,
1689
  ]
1690

    
1691

    
1692
class _ShowUsage(Exception):
1693
  """Exception class for L{_ParseArgs}.
1694

1695
  """
1696
  def __init__(self, exit_error):
1697
    """Initializes instances of this class.
1698

1699
    @type exit_error: bool
1700
    @param exit_error: Whether to report failure on exit
1701

1702
    """
1703
    Exception.__init__(self)
1704
    self.exit_error = exit_error
1705

    
1706

    
1707
class _ShowVersion(Exception):
1708
  """Exception class for L{_ParseArgs}.
1709

1710
  """
1711

    
1712

    
1713
def _ParseArgs(binary, argv, commands, aliases, env_override):
1714
  """Parser for the command line arguments.
1715

1716
  This function parses the arguments and returns the function which
1717
  must be executed together with its (modified) arguments.
1718

1719
  @param binary: Script name
1720
  @param argv: Command line arguments
1721
  @param commands: Dictionary containing command definitions
1722
  @param aliases: dictionary with command aliases {"alias": "target", ...}
1723
  @param env_override: list of env variables allowed for default args
1724
  @raise _ShowUsage: If usage description should be shown
1725
  @raise _ShowVersion: If version should be shown
1726

1727
  """
1728
  assert not (env_override - set(commands))
1729
  assert not (set(aliases.keys()) & set(commands.keys()))
1730

    
1731
  if len(argv) > 1:
1732
    cmd = argv[1]
1733
  else:
1734
    # No option or command given
1735
    raise _ShowUsage(exit_error=True)
1736

    
1737
  if cmd == "--version":
1738
    raise _ShowVersion()
1739
  elif cmd == "--help":
1740
    raise _ShowUsage(exit_error=False)
1741
  elif not (cmd in commands or cmd in aliases):
1742
    raise _ShowUsage(exit_error=True)
1743

    
1744
  # get command, unalias it, and look it up in commands
1745
  if cmd in aliases:
1746
    if aliases[cmd] not in commands:
1747
      raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1748
                                   " command '%s'" % (cmd, aliases[cmd]))
1749

    
1750
    cmd = aliases[cmd]
1751

    
1752
  if cmd in env_override:
1753
    args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
1754
    env_args = os.environ.get(args_env_name)
1755
    if env_args:
1756
      argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
1757

    
1758
  func, args_def, parser_opts, usage, description = commands[cmd]
1759
  parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1760
                        description=description,
1761
                        formatter=TitledHelpFormatter(),
1762
                        usage="%%prog %s %s" % (cmd, usage))
1763
  parser.disable_interspersed_args()
1764
  options, args = parser.parse_args(args=argv[2:])
1765

    
1766
  if not _CheckArguments(cmd, args_def, args):
1767
    return None, None, None
1768

    
1769
  return func, options, args
1770

    
1771

    
1772
def _FormatUsage(binary, commands):
1773
  """Generates a nice description of all commands.
1774

1775
  @param binary: Script name
1776
  @param commands: Dictionary containing command definitions
1777

1778
  """
1779
  # compute the max line length for cmd + usage
1780
  mlen = min(60, max(map(len, commands)))
1781

    
1782
  yield "Usage: %s {command} [options...] [argument...]" % binary
1783
  yield "%s <command> --help to see details, or man %s" % (binary, binary)
1784
  yield ""
1785
  yield "Commands:"
1786

    
1787
  # and format a nice command list
1788
  for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()):
1789
    help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1790
    yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0))
1791
    for line in help_lines:
1792
      yield " %-*s   %s" % (mlen, "", line)
1793

    
1794
  yield ""
1795

    
1796

    
1797
def _CheckArguments(cmd, args_def, args):
1798
  """Verifies the arguments using the argument definition.
1799

1800
  Algorithm:
1801

1802
    1. Abort with error if values specified by user but none expected.
1803

1804
    1. For each argument in definition
1805

1806
      1. Keep running count of minimum number of values (min_count)
1807
      1. Keep running count of maximum number of values (max_count)
1808
      1. If it has an unlimited number of values
1809

1810
        1. Abort with error if it's not the last argument in the definition
1811

1812
    1. If last argument has limited number of values
1813

1814
      1. Abort with error if number of values doesn't match or is too large
1815

1816
    1. Abort with error if user didn't pass enough values (min_count)
1817

1818
  """
1819
  if args and not args_def:
1820
    ToStderr("Error: Command %s expects no arguments", cmd)
1821
    return False
1822

    
1823
  min_count = None
1824
  max_count = None
1825
  check_max = None
1826

    
1827
  last_idx = len(args_def) - 1
1828

    
1829
  for idx, arg in enumerate(args_def):
1830
    if min_count is None:
1831
      min_count = arg.min
1832
    elif arg.min is not None:
1833
      min_count += arg.min
1834

    
1835
    if max_count is None:
1836
      max_count = arg.max
1837
    elif arg.max is not None:
1838
      max_count += arg.max
1839

    
1840
    if idx == last_idx:
1841
      check_max = (arg.max is not None)
1842

    
1843
    elif arg.max is None:
1844
      raise errors.ProgrammerError("Only the last argument can have max=None")
1845

    
1846
  if check_max:
1847
    # Command with exact number of arguments
1848
    if (min_count is not None and max_count is not None and
1849
        min_count == max_count and len(args) != min_count):
1850
      ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1851
      return False
1852

    
1853
    # Command with limited number of arguments
1854
    if max_count is not None and len(args) > max_count:
1855
      ToStderr("Error: Command %s expects only %d argument(s)",
1856
               cmd, max_count)
1857
      return False
1858

    
1859
  # Command with some required arguments
1860
  if min_count is not None and len(args) < min_count:
1861
    ToStderr("Error: Command %s expects at least %d argument(s)",
1862
             cmd, min_count)
1863
    return False
1864

    
1865
  return True
1866

    
1867

    
1868
def SplitNodeOption(value):
1869
  """Splits the value of a --node option.
1870

1871
  """
1872
  if value and ":" in value:
1873
    return value.split(":", 1)
1874
  else:
1875
    return (value, None)
1876

    
1877

    
1878
def CalculateOSNames(os_name, os_variants):
1879
  """Calculates all the names an OS can be called, according to its variants.
1880

1881
  @type os_name: string
1882
  @param os_name: base name of the os
1883
  @type os_variants: list or None
1884
  @param os_variants: list of supported variants
1885
  @rtype: list
1886
  @return: list of valid names
1887

1888
  """
1889
  if os_variants:
1890
    return ["%s+%s" % (os_name, v) for v in os_variants]
1891
  else:
1892
    return [os_name]
1893

    
1894

    
1895
def ParseFields(selected, default):
1896
  """Parses the values of "--field"-like options.
1897

1898
  @type selected: string or None
1899
  @param selected: User-selected options
1900
  @type default: list
1901
  @param default: Default fields
1902

1903
  """
1904
  if selected is None:
1905
    return default
1906

    
1907
  if selected.startswith("+"):
1908
    return default + selected[1:].split(",")
1909

    
1910
  return selected.split(",")
1911

    
1912

    
1913
UsesRPC = rpc.RunWithRPC
1914

    
1915

    
1916
def AskUser(text, choices=None):
1917
  """Ask the user a question.
1918

1919
  @param text: the question to ask
1920

1921
  @param choices: list with elements tuples (input_char, return_value,
1922
      description); if not given, it will default to: [('y', True,
1923
      'Perform the operation'), ('n', False, 'Do no do the operation')];
1924
      note that the '?' char is reserved for help
1925

1926
  @return: one of the return values from the choices list; if input is
1927
      not possible (i.e. not running with a tty, we return the last
1928
      entry from the list
1929

1930
  """
1931
  if choices is None:
1932
    choices = [("y", True, "Perform the operation"),
1933
               ("n", False, "Do not perform the operation")]
1934
  if not choices or not isinstance(choices, list):
1935
    raise errors.ProgrammerError("Invalid choices argument to AskUser")
1936
  for entry in choices:
1937
    if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
1938
      raise errors.ProgrammerError("Invalid choices element to AskUser")
1939

    
1940
  answer = choices[-1][1]
1941
  new_text = []
1942
  for line in text.splitlines():
1943
    new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1944
  text = "\n".join(new_text)
1945
  try:
1946
    f = file("/dev/tty", "a+")
1947
  except IOError:
1948
    return answer
1949
  try:
1950
    chars = [entry[0] for entry in choices]
1951
    chars[-1] = "[%s]" % chars[-1]
1952
    chars.append("?")
1953
    maps = dict([(entry[0], entry[1]) for entry in choices])
1954
    while True:
1955
      f.write(text)
1956
      f.write("\n")
1957
      f.write("/".join(chars))
1958
      f.write(": ")
1959
      line = f.readline(2).strip().lower()
1960
      if line in maps:
1961
        answer = maps[line]
1962
        break
1963
      elif line == "?":
1964
        for entry in choices:
1965
          f.write(" %s - %s\n" % (entry[0], entry[2]))
1966
        f.write("\n")
1967
        continue
1968
  finally:
1969
    f.close()
1970
  return answer
1971

    
1972

    
1973
class JobSubmittedException(Exception):
1974
  """Job was submitted, client should exit.
1975

1976
  This exception has one argument, the ID of the job that was
1977
  submitted. The handler should print this ID.
1978

1979
  This is not an error, just a structured way to exit from clients.
1980

1981
  """
1982

    
1983

    
1984
def SendJob(ops, cl=None):
1985
  """Function to submit an opcode without waiting for the results.
1986

1987
  @type ops: list
1988
  @param ops: list of opcodes
1989
  @type cl: luxi.Client
1990
  @param cl: the luxi client to use for communicating with the master;
1991
             if None, a new client will be created
1992

1993
  """
1994
  if cl is None:
1995
    cl = GetClient()
1996

    
1997
  job_id = cl.SubmitJob(ops)
1998

    
1999
  return job_id
2000

    
2001

    
2002
def GenericPollJob(job_id, cbs, report_cbs):
2003
  """Generic job-polling function.
2004

2005
  @type job_id: number
2006
  @param job_id: Job ID
2007
  @type cbs: Instance of L{JobPollCbBase}
2008
  @param cbs: Data callbacks
2009
  @type report_cbs: Instance of L{JobPollReportCbBase}
2010
  @param report_cbs: Reporting callbacks
2011

2012
  """
2013
  prev_job_info = None
2014
  prev_logmsg_serial = None
2015

    
2016
  status = None
2017

    
2018
  while True:
2019
    result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
2020
                                      prev_logmsg_serial)
2021
    if not result:
2022
      # job not found, go away!
2023
      raise errors.JobLost("Job with id %s lost" % job_id)
2024

    
2025
    if result == constants.JOB_NOTCHANGED:
2026
      report_cbs.ReportNotChanged(job_id, status)
2027

    
2028
      # Wait again
2029
      continue
2030

    
2031
    # Split result, a tuple of (field values, log entries)
2032
    (job_info, log_entries) = result
2033
    (status, ) = job_info
2034

    
2035
    if log_entries:
2036
      for log_entry in log_entries:
2037
        (serial, timestamp, log_type, message) = log_entry
2038
        report_cbs.ReportLogMessage(job_id, serial, timestamp,
2039
                                    log_type, message)
2040
        prev_logmsg_serial = max(prev_logmsg_serial, serial)
2041

    
2042
    # TODO: Handle canceled and archived jobs
2043
    elif status in (constants.JOB_STATUS_SUCCESS,
2044
                    constants.JOB_STATUS_ERROR,
2045
                    constants.JOB_STATUS_CANCELING,
2046
                    constants.JOB_STATUS_CANCELED):
2047
      break
2048

    
2049
    prev_job_info = job_info
2050

    
2051
  jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
2052
  if not jobs:
2053
    raise errors.JobLost("Job with id %s lost" % job_id)
2054

    
2055
  status, opstatus, result = jobs[0]
2056

    
2057
  if status == constants.JOB_STATUS_SUCCESS:
2058
    return result
2059

    
2060
  if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
2061
    raise errors.OpExecError("Job was canceled")
2062

    
2063
  has_ok = False
2064
  for idx, (status, msg) in enumerate(zip(opstatus, result)):
2065
    if status == constants.OP_STATUS_SUCCESS:
2066
      has_ok = True
2067
    elif status == constants.OP_STATUS_ERROR:
2068
      errors.MaybeRaise(msg)
2069

    
2070
      if has_ok:
2071
        raise errors.OpExecError("partial failure (opcode %d): %s" %
2072
                                 (idx, msg))
2073

    
2074
      raise errors.OpExecError(str(msg))
2075

    
2076
  # default failure mode
2077
  raise errors.OpExecError(result)
2078

    
2079

    
2080
class JobPollCbBase:
2081
  """Base class for L{GenericPollJob} callbacks.
2082

2083
  """
2084
  def __init__(self):
2085
    """Initializes this class.
2086

2087
    """
2088

    
2089
  def WaitForJobChangeOnce(self, job_id, fields,
2090
                           prev_job_info, prev_log_serial):
2091
    """Waits for changes on a job.
2092

2093
    """
2094
    raise NotImplementedError()
2095

    
2096
  def QueryJobs(self, job_ids, fields):
2097
    """Returns the selected fields for the selected job IDs.
2098

2099
    @type job_ids: list of numbers
2100
    @param job_ids: Job IDs
2101
    @type fields: list of strings
2102
    @param fields: Fields
2103

2104
    """
2105
    raise NotImplementedError()
2106

    
2107

    
2108
class JobPollReportCbBase:
2109
  """Base class for L{GenericPollJob} reporting callbacks.
2110

2111
  """
2112
  def __init__(self):
2113
    """Initializes this class.
2114

2115
    """
2116

    
2117
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2118
    """Handles a log message.
2119

2120
    """
2121
    raise NotImplementedError()
2122

    
2123
  def ReportNotChanged(self, job_id, status):
2124
    """Called for if a job hasn't changed in a while.
2125

2126
    @type job_id: number
2127
    @param job_id: Job ID
2128
    @type status: string or None
2129
    @param status: Job status if available
2130

2131
    """
2132
    raise NotImplementedError()
2133

    
2134

    
2135
class _LuxiJobPollCb(JobPollCbBase):
2136
  def __init__(self, cl):
2137
    """Initializes this class.
2138

2139
    """
2140
    JobPollCbBase.__init__(self)
2141
    self.cl = cl
2142

    
2143
  def WaitForJobChangeOnce(self, job_id, fields,
2144
                           prev_job_info, prev_log_serial):
2145
    """Waits for changes on a job.
2146

2147
    """
2148
    return self.cl.WaitForJobChangeOnce(job_id, fields,
2149
                                        prev_job_info, prev_log_serial)
2150

    
2151
  def QueryJobs(self, job_ids, fields):
2152
    """Returns the selected fields for the selected job IDs.
2153

2154
    """
2155
    return self.cl.QueryJobs(job_ids, fields)
2156

    
2157

    
2158
class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2159
  def __init__(self, feedback_fn):
2160
    """Initializes this class.
2161

2162
    """
2163
    JobPollReportCbBase.__init__(self)
2164

    
2165
    self.feedback_fn = feedback_fn
2166

    
2167
    assert callable(feedback_fn)
2168

    
2169
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2170
    """Handles a log message.
2171

2172
    """
2173
    self.feedback_fn((timestamp, log_type, log_msg))
2174

    
2175
  def ReportNotChanged(self, job_id, status):
2176
    """Called if a job hasn't changed in a while.
2177

2178
    """
2179
    # Ignore
2180

    
2181

    
2182
class StdioJobPollReportCb(JobPollReportCbBase):
2183
  def __init__(self):
2184
    """Initializes this class.
2185

2186
    """
2187
    JobPollReportCbBase.__init__(self)
2188

    
2189
    self.notified_queued = False
2190
    self.notified_waitlock = False
2191

    
2192
  def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2193
    """Handles a log message.
2194

2195
    """
2196
    ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)),
2197
             FormatLogMessage(log_type, log_msg))
2198

    
2199
  def ReportNotChanged(self, job_id, status):
2200
    """Called if a job hasn't changed in a while.
2201

2202
    """
2203
    if status is None:
2204
      return
2205

    
2206
    if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
2207
      ToStderr("Job %s is waiting in queue", job_id)
2208
      self.notified_queued = True
2209

    
2210
    elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
2211
      ToStderr("Job %s is trying to acquire all necessary locks", job_id)
2212
      self.notified_waitlock = True
2213

    
2214

    
2215
def FormatLogMessage(log_type, log_msg):
2216
  """Formats a job message according to its type.
2217

2218
  """
2219
  if log_type != constants.ELOG_MESSAGE:
2220
    log_msg = str(log_msg)
2221

    
2222
  return utils.SafeEncode(log_msg)
2223

    
2224

    
2225
def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2226
  """Function to poll for the result of a job.
2227

2228
  @type job_id: job identified
2229
  @param job_id: the job to poll for results
2230
  @type cl: luxi.Client
2231
  @param cl: the luxi client to use for communicating with the master;
2232
             if None, a new client will be created
2233

2234
  """
2235
  if cl is None:
2236
    cl = GetClient()
2237

    
2238
  if reporter is None:
2239
    if feedback_fn:
2240
      reporter = FeedbackFnJobPollReportCb(feedback_fn)
2241
    else:
2242
      reporter = StdioJobPollReportCb()
2243
  elif feedback_fn:
2244
    raise errors.ProgrammerError("Can't specify reporter and feedback function")
2245

    
2246
  return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2247

    
2248

    
2249
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2250
  """Legacy function to submit an opcode.
2251

2252
  This is just a simple wrapper over the construction of the processor
2253
  instance. It should be extended to better handle feedback and
2254
  interaction functions.
2255

2256
  """
2257
  if cl is None:
2258
    cl = GetClient()
2259

    
2260
  SetGenericOpcodeOpts([op], opts)
2261

    
2262
  job_id = SendJob([op], cl=cl)
2263

    
2264
  op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
2265
                       reporter=reporter)
2266

    
2267
  return op_results[0]
2268

    
2269

    
2270
def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2271
  """Wrapper around SubmitOpCode or SendJob.
2272

2273
  This function will decide, based on the 'opts' parameter, whether to
2274
  submit and wait for the result of the opcode (and return it), or
2275
  whether to just send the job and print its identifier. It is used in
2276
  order to simplify the implementation of the '--submit' option.
2277

2278
  It will also process the opcodes if we're sending the via SendJob
2279
  (otherwise SubmitOpCode does it).
2280

2281
  """
2282
  if opts and opts.submit_only:
2283
    job = [op]
2284
    SetGenericOpcodeOpts(job, opts)
2285
    job_id = SendJob(job, cl=cl)
2286
    raise JobSubmittedException(job_id)
2287
  else:
2288
    return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2289

    
2290

    
2291
def _InitReasonTrail(op, opts):
2292
  """Builds the first part of the reason trail
2293

2294
  Builds the initial part of the reason trail, adding the user provided reason
2295
  (if it exists) and the name of the command starting the operation.
2296

2297
  @param op: the opcode the reason trail will be added to
2298
  @param opts: the command line options selected by the user
2299

2300
  """
2301
  assert len(sys.argv) >= 2
2302
  trail = []
2303

    
2304
  if opts.reason:
2305
    trail.append((constants.OPCODE_REASON_SRC_USER,
2306
                  opts.reason,
2307
                  utils.EpochNano()))
2308

    
2309
  binary = os.path.basename(sys.argv[0])
2310
  source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
2311
  command = sys.argv[1]
2312
  trail.append((source, command, utils.EpochNano()))
2313
  op.reason = trail
2314

    
2315

    
2316
def SetGenericOpcodeOpts(opcode_list, options):
2317
  """Processor for generic options.
2318

2319
  This function updates the given opcodes based on generic command
2320
  line options (like debug, dry-run, etc.).
2321

2322
  @param opcode_list: list of opcodes
2323
  @param options: command line options or None
2324
  @return: None (in-place modification)
2325

2326
  """
2327
  if not options:
2328
    return
2329
  for op in opcode_list:
2330
    op.debug_level = options.debug
2331
    if hasattr(options, "dry_run"):
2332
      op.dry_run = options.dry_run
2333
    if getattr(options, "priority", None) is not None:
2334
      op.priority = options.priority
2335
    _InitReasonTrail(op, options)
2336

    
2337

    
2338
def GetClient(query=False):
2339
  """Connects to the a luxi socket and returns a client.
2340

2341
  @type query: boolean
2342
  @param query: this signifies that the client will only be
2343
      used for queries; if the build-time parameter
2344
      enable-split-queries is enabled, then the client will be
2345
      connected to the query socket instead of the masterd socket
2346

2347
  """
2348
  override_socket = os.getenv(constants.LUXI_OVERRIDE, "")
2349
  if override_socket:
2350
    if override_socket == constants.LUXI_OVERRIDE_MASTER:
2351
      address = pathutils.MASTER_SOCKET
2352
    elif override_socket == constants.LUXI_OVERRIDE_QUERY:
2353
      address = pathutils.QUERY_SOCKET
2354
    else:
2355
      address = override_socket
2356
  elif query and constants.ENABLE_SPLIT_QUERY:
2357
    address = pathutils.QUERY_SOCKET
2358
  else:
2359
    address = None
2360
  # TODO: Cache object?
2361
  try:
2362
    client = luxi.Client(address=address)
2363
  except luxi.NoMasterError:
2364
    ss = ssconf.SimpleStore()
2365

    
2366
    # Try to read ssconf file
2367
    try:
2368
      ss.GetMasterNode()
2369
    except errors.ConfigurationError:
2370
      raise errors.OpPrereqError("Cluster not initialized or this machine is"
2371
                                 " not part of a cluster",
2372
                                 errors.ECODE_INVAL)
2373

    
2374
    master, myself = ssconf.GetMasterAndMyself(ss=ss)
2375
    if master != myself:
2376
      raise errors.OpPrereqError("This is not the master node, please connect"
2377
                                 " to node '%s' and rerun the command" %
2378
                                 master, errors.ECODE_INVAL)
2379
    raise
2380
  return client
2381

    
2382

    
2383
def FormatError(err):
2384
  """Return a formatted error message for a given error.
2385

2386
  This function takes an exception instance and returns a tuple
2387
  consisting of two values: first, the recommended exit code, and
2388
  second, a string describing the error message (not
2389
  newline-terminated).
2390

2391
  """
2392
  retcode = 1
2393
  obuf = StringIO()
2394
  msg = str(err)
2395
  if isinstance(err, errors.ConfigurationError):
2396
    txt = "Corrupt configuration file: %s" % msg
2397
    logging.error(txt)
2398
    obuf.write(txt + "\n")
2399
    obuf.write("Aborting.")
2400
    retcode = 2
2401
  elif isinstance(err, errors.HooksAbort):
2402
    obuf.write("Failure: hooks execution failed:\n")
2403
    for node, script, out in err.args[0]:
2404
      if out:
2405
        obuf.write("  node: %s, script: %s, output: %s\n" %
2406
                   (node, script, out))
2407
      else:
2408
        obuf.write("  node: %s, script: %s (no output)\n" %
2409
                   (node, script))
2410
  elif isinstance(err, errors.HooksFailure):
2411
    obuf.write("Failure: hooks general failure: %s" % msg)
2412
  elif isinstance(err, errors.ResolverError):
2413
    this_host = netutils.Hostname.GetSysName()
2414
    if err.args[0] == this_host:
2415
      msg = "Failure: can't resolve my own hostname ('%s')"
2416
    else:
2417
      msg = "Failure: can't resolve hostname '%s'"
2418
    obuf.write(msg % err.args[0])
2419
  elif isinstance(err, errors.OpPrereqError):
2420
    if len(err.args) == 2:
2421
      obuf.write("Failure: prerequisites not met for this"
2422
                 " operation:\nerror type: %s, error details:\n%s" %
2423
                 (err.args[1], err.args[0]))
2424
    else:
2425
      obuf.write("Failure: prerequisites not met for this"
2426
                 " operation:\n%s" % msg)
2427
  elif isinstance(err, errors.OpExecError):
2428
    obuf.write("Failure: command execution error:\n%s" % msg)
2429
  elif isinstance(err, errors.TagError):
2430
    obuf.write("Failure: invalid tag(s) given:\n%s" % msg)
2431
  elif isinstance(err, errors.JobQueueDrainError):
2432
    obuf.write("Failure: the job queue is marked for drain and doesn't"
2433
               " accept new requests\n")
2434
  elif isinstance(err, errors.JobQueueFull):
2435
    obuf.write("Failure: the job queue is full and doesn't accept new"
2436
               " job submissions until old jobs are archived\n")
2437
  elif isinstance(err, errors.TypeEnforcementError):
2438
    obuf.write("Parameter Error: %s" % msg)
2439
  elif isinstance(err, errors.ParameterError):
2440
    obuf.write("Failure: unknown/wrong parameter name '%s'" % msg)
2441
  elif isinstance(err, luxi.NoMasterError):
2442
    if err.args[0] == pathutils.MASTER_SOCKET:
2443
      daemon = "the master daemon"
2444
    elif err.args[0] == pathutils.QUERY_SOCKET:
2445
      daemon = "the config daemon"
2446
    else:
2447
      daemon = "socket '%s'" % str(err.args[0])
2448
    obuf.write("Cannot communicate with %s.\nIs the process running"
2449
               " and listening for connections?" % daemon)
2450
  elif isinstance(err, luxi.TimeoutError):
2451
    obuf.write("Timeout while talking to the master daemon. Jobs might have"
2452
               " been submitted and will continue to run even if the call"
2453
               " timed out. Useful commands in this situation are \"gnt-job"
2454
               " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n")
2455
    obuf.write(msg)
2456
  elif isinstance(err, luxi.PermissionError):
2457
    obuf.write("It seems you don't have permissions to connect to the"
2458
               " master daemon.\nPlease retry as a different user.")
2459
  elif isinstance(err, luxi.ProtocolError):
2460
    obuf.write("Unhandled protocol error while talking to the master daemon:\n"
2461
               "%s" % msg)
2462
  elif isinstance(err, errors.JobLost):
2463
    obuf.write("Error checking job status: %s" % msg)
2464
  elif isinstance(err, errors.QueryFilterParseError):
2465
    obuf.write("Error while parsing query filter: %s\n" % err.args[0])
2466
    obuf.write("\n".join(err.GetDetails()))
2467
  elif isinstance(err, errors.GenericError):
2468
    obuf.write("Unhandled Ganeti error: %s" % msg)
2469
  elif isinstance(err, JobSubmittedException):
2470
    obuf.write("JobID: %s\n" % err.args[0])
2471
    retcode = 0
2472
  else:
2473
    obuf.write("Unhandled exception: %s" % msg)
2474
  return retcode, obuf.getvalue().rstrip("\n")
2475

    
2476

    
2477
def GenericMain(commands, override=None, aliases=None,
2478
                env_override=frozenset()):
2479
  """Generic main function for all the gnt-* commands.
2480

2481
  @param commands: a dictionary with a special structure, see the design doc
2482
                   for command line handling.
2483
  @param override: if not None, we expect a dictionary with keys that will
2484
                   override command line options; this can be used to pass
2485
                   options from the scripts to generic functions
2486
  @param aliases: dictionary with command aliases {'alias': 'target, ...}
2487
  @param env_override: list of environment names which are allowed to submit
2488
                       default args for commands
2489

2490
  """
2491
  # save the program name and the entire command line for later logging
2492
  if sys.argv:
2493
    binary = os.path.basename(sys.argv[0])
2494
    if not binary:
2495
      binary = sys.argv[0]
2496

    
2497
    if len(sys.argv) >= 2:
2498
      logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
2499
    else:
2500
      logname = binary
2501

    
2502
    cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
2503
  else:
2504
    binary = "<unknown program>"
2505
    cmdline = "<unknown>"
2506

    
2507
  if aliases is None:
2508
    aliases = {}
2509

    
2510
  try:
2511
    (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
2512
                                       env_override)
2513
  except _ShowVersion:
2514
    ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
2515
             constants.RELEASE_VERSION)
2516
    return constants.EXIT_SUCCESS
2517
  except _ShowUsage, err:
2518
    for line in _FormatUsage(binary, commands):
2519
      ToStdout(line)
2520

    
2521
    if err.exit_error:
2522
      return constants.EXIT_FAILURE
2523
    else:
2524
      return constants.EXIT_SUCCESS
2525
  except errors.ParameterError, err:
2526
    result, err_msg = FormatError(err)
2527
    ToStderr(err_msg)
2528
    return 1
2529

    
2530
  if func is None: # parse error
2531
    return 1
2532

    
2533
  if override is not None:
2534
    for key, val in override.iteritems():
2535
      setattr(options, key, val)
2536

    
2537
  utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
2538
                     stderr_logging=True)
2539

    
2540
  logging.info("Command line: %s", cmdline)
2541

    
2542
  try:
2543
    result = func(options, args)
2544
  except (errors.GenericError, luxi.ProtocolError,
2545
          JobSubmittedException), err:
2546
    result, err_msg = FormatError(err)
2547
    logging.exception("Error during command processing")
2548
    ToStderr(err_msg)
2549
  except KeyboardInterrupt:
2550
    result = constants.EXIT_FAILURE
2551
    ToStderr("Aborted. Note that if the operation created any jobs, they"
2552
             " might have been submitted and"
2553
             " will continue to run in the background.")
2554
  except IOError, err:
2555
    if err.errno == errno.EPIPE:
2556
      # our terminal went away, we'll exit
2557
      sys.exit(constants.EXIT_FAILURE)
2558
    else:
2559
      raise
2560

    
2561
  return result
2562

    
2563

    
2564
def ParseNicOption(optvalue):
2565
  """Parses the value of the --net option(s).
2566

2567
  """
2568
  try:
2569
    nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
2570
  except (TypeError, ValueError), err:
2571
    raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
2572
                               errors.ECODE_INVAL)
2573

    
2574
  nics = [{}] * nic_max
2575
  for nidx, ndict in optvalue:
2576
    nidx = int(nidx)
2577

    
2578
    if not isinstance(ndict, dict):
2579
      raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
2580
                                 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
2581

    
2582
    utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
2583

    
2584
    nics[nidx] = ndict
2585

    
2586
  return nics
2587

    
2588

    
2589
def GenericInstanceCreate(mode, opts, args):
2590
  """Add an instance to the cluster via either creation or import.
2591

2592
  @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
2593
  @param opts: the command line options selected by the user
2594
  @type args: list
2595
  @param args: should contain only one element, the new instance name
2596
  @rtype: int
2597
  @return: the desired exit code
2598

2599
  """
2600
  instance = args[0]
2601

    
2602
  (pnode, snode) = SplitNodeOption(opts.node)
2603

    
2604
  hypervisor = None
2605
  hvparams = {}
2606
  if opts.hypervisor:
2607
    hypervisor, hvparams = opts.hypervisor
2608

    
2609
  if opts.nics:
2610
    nics = ParseNicOption(opts.nics)
2611
  elif opts.no_nics:
2612
    # no nics
2613
    nics = []
2614
  elif mode == constants.INSTANCE_CREATE:
2615
    # default of one nic, all auto
2616
    nics = [{}]
2617
  else:
2618
    # mode == import
2619
    nics = []
2620

    
2621
  if opts.disk_template == constants.DT_DISKLESS:
2622
    if opts.disks or opts.sd_size is not None:
2623
      raise errors.OpPrereqError("Diskless instance but disk"
2624
                                 " information passed", errors.ECODE_INVAL)
2625
    disks = []
2626
  else:
2627
    if (not opts.disks and not opts.sd_size
2628
        and mode == constants.INSTANCE_CREATE):
2629
      raise errors.OpPrereqError("No disk information specified",
2630
                                 errors.ECODE_INVAL)
2631
    if opts.disks and opts.sd_size is not None:
2632
      raise errors.OpPrereqError("Please use either the '--disk' or"
2633
                                 " '-s' option", errors.ECODE_INVAL)
2634
    if opts.sd_size is not None:
2635
      opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
2636

    
2637
    if opts.disks:
2638
      try:
2639
        disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2640
      except ValueError, err:
2641
        raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
2642
                                   errors.ECODE_INVAL)
2643
      disks = [{}] * disk_max
2644
    else:
2645
      disks = []
2646
    for didx, ddict in opts.disks:
2647
      didx = int(didx)
2648
      if not isinstance(ddict, dict):
2649
        msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2650
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
2651
      elif constants.IDISK_SIZE in ddict:
2652
        if constants.IDISK_ADOPT in ddict:
2653
          raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2654
                                     " (disk %d)" % didx, errors.ECODE_INVAL)
2655
        try:
2656
          ddict[constants.IDISK_SIZE] = \
2657
            utils.ParseUnit(ddict[constants.IDISK_SIZE])
2658
        except ValueError, err:
2659
          raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2660
                                     (didx, err), errors.ECODE_INVAL)
2661
      elif constants.IDISK_ADOPT in ddict:
2662
        if mode == constants.INSTANCE_IMPORT:
2663
          raise errors.OpPrereqError("Disk adoption not allowed for instance"
2664
                                     " import", errors.ECODE_INVAL)
2665
        ddict[constants.IDISK_SIZE] = 0
2666
      else:
2667
        raise errors.OpPrereqError("Missing size or adoption source for"
2668
                                   " disk %d" % didx, errors.ECODE_INVAL)
2669
      disks[didx] = ddict
2670

    
2671
  if opts.tags is not None:
2672
    tags = opts.tags.split(",")
2673
  else:
2674
    tags = []
2675

    
2676
  utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
2677
  utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2678

    
2679
  if mode == constants.INSTANCE_CREATE:
2680
    start = opts.start
2681
    os_type = opts.os
2682
    force_variant = opts.force_variant
2683
    src_node = None
2684
    src_path = None
2685
    no_install = opts.no_install
2686
    identify_defaults = False
2687
  elif mode == constants.INSTANCE_IMPORT:
2688
    start = False
2689
    os_type = None
2690
    force_variant = False
2691
    src_node = opts.src_node
2692
    src_path = opts.src_dir
2693
    no_install = None
2694
    identify_defaults = opts.identify_defaults
2695
  else:
2696
    raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2697

    
2698
  op = opcodes.OpInstanceCreate(instance_name=instance,
2699
                                disks=disks,
2700
                                disk_template=opts.disk_template,
2701
                                nics=nics,
2702
                                conflicts_check=opts.conflicts_check,
2703
                                pnode=pnode, snode=snode,
2704
                                ip_check=opts.ip_check,
2705
                                name_check=opts.name_check,
2706
                                wait_for_sync=opts.wait_for_sync,
2707
                                file_storage_dir=opts.file_storage_dir,
2708
                                file_driver=opts.file_driver,
2709
                                iallocator=opts.iallocator,
2710
                                hypervisor=hypervisor,
2711
                                hvparams=hvparams,
2712
                                beparams=opts.beparams,
2713
                                osparams=opts.osparams,
2714
                                mode=mode,
2715
                                start=start,
2716
                                os_type=os_type,
2717
                                force_variant=force_variant,
2718
                                src_node=src_node,
2719
                                src_path=src_path,
2720
                                tags=tags,
2721
                                no_install=no_install,
2722
                                identify_defaults=identify_defaults,
2723
                                ignore_ipolicy=opts.ignore_ipolicy)
2724

    
2725
  SubmitOrSend(op, opts)
2726
  return 0
2727

    
2728

    
2729
class _RunWhileClusterStoppedHelper:
2730
  """Helper class for L{RunWhileClusterStopped} to simplify state management
2731

2732
  """
2733
  def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2734
    """Initializes this class.
2735

2736
    @type feedback_fn: callable
2737
    @param feedback_fn: Feedback function
2738
    @type cluster_name: string
2739
    @param cluster_name: Cluster name
2740
    @type master_node: string
2741
    @param master_node Master node name
2742
    @type online_nodes: list
2743
    @param online_nodes: List of names of online nodes
2744

2745
    """
2746
    self.feedback_fn = feedback_fn
2747
    self.cluster_name = cluster_name
2748
    self.master_node = master_node
2749
    self.online_nodes = online_nodes
2750

    
2751
    self.ssh = ssh.SshRunner(self.cluster_name)
2752

    
2753
    self.nonmaster_nodes = [name for name in online_nodes
2754
                            if name != master_node]
2755

    
2756
    assert self.master_node not in self.nonmaster_nodes
2757

    
2758
  def _RunCmd(self, node_name, cmd):
2759
    """Runs a command on the local or a remote machine.
2760

2761
    @type node_name: string
2762
    @param node_name: Machine name
2763
    @type cmd: list
2764
    @param cmd: Command
2765

2766
    """
2767
    if node_name is None or node_name == self.master_node:
2768
      # No need to use SSH
2769
      result = utils.RunCmd(cmd)
2770
    else:
2771
      result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
2772
                            utils.ShellQuoteArgs(cmd))
2773

    
2774
    if result.failed:
2775
      errmsg = ["Failed to run command %s" % result.cmd]
2776
      if node_name:
2777
        errmsg.append("on node %s" % node_name)
2778
      errmsg.append(": exitcode %s and error %s" %
2779
                    (result.exit_code, result.output))
2780
      raise errors.OpExecError(" ".join(errmsg))
2781

    
2782
  def Call(self, fn, *args):
2783
    """Call function while all daemons are stopped.
2784

2785
    @type fn: callable
2786
    @param fn: Function to be called
2787

2788
    """
2789
    # Pause watcher by acquiring an exclusive lock on watcher state file
2790
    self.feedback_fn("Blocking watcher")
2791
    watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
2792
    try:
2793
      # TODO: Currently, this just blocks. There's no timeout.
2794
      # TODO: Should it be a shared lock?
2795
      watcher_block.Exclusive(blocking=True)
2796

    
2797
      # Stop master daemons, so that no new jobs can come in and all running
2798
      # ones are finished
2799
      self.feedback_fn("Stopping master daemons")
2800
      self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
2801
      try:
2802
        # Stop daemons on all nodes
2803
        for node_name in self.online_nodes:
2804
          self.feedback_fn("Stopping daemons on %s" % node_name)
2805
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
2806

    
2807
        # All daemons are shut down now
2808
        try:
2809
          return fn(self, *args)
2810
        except Exception, err:
2811
          _, errmsg = FormatError(err)
2812
          logging.exception("Caught exception")
2813
          self.feedback_fn(errmsg)
2814
          raise
2815
      finally:
2816
        # Start cluster again, master node last
2817
        for node_name in self.nonmaster_nodes + [self.master_node]:
2818
          self.feedback_fn("Starting daemons on %s" % node_name)
2819
          self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
2820
    finally:
2821
      # Resume watcher
2822
      watcher_block.Close()
2823

    
2824

    
2825
def RunWhileClusterStopped(feedback_fn, fn, *args):
2826
  """Calls a function while all cluster daemons are stopped.
2827

2828
  @type feedback_fn: callable
2829
  @param feedback_fn: Feedback function
2830
  @type fn: callable
2831
  @param fn: Function to be called when daemons are stopped
2832

2833
  """
2834
  feedback_fn("Gathering cluster information")
2835

    
2836
  # This ensures we're running on the master daemon
2837
  cl = GetClient()
2838

    
2839
  (cluster_name, master_node) = \
2840
    cl.QueryConfigValues(["cluster_name", "master_node"])
2841

    
2842
  online_nodes = GetOnlineNodes([], cl=cl)
2843

    
2844
  # Don't keep a reference to the client. The master daemon will go away.
2845
  del cl
2846

    
2847
  assert master_node in online_nodes
2848

    
2849
  return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2850
                                       online_nodes).Call(fn, *args)
2851

    
2852

    
2853
def GenerateTable(headers, fields, separator, data,
2854
                  numfields=None, unitfields=None,
2855
                  units=None):
2856
  """Prints a table with headers and different fields.
2857

2858
  @type headers: dict
2859
  @param headers: dictionary mapping field names to headers for
2860
      the table
2861
  @type fields: list
2862
  @param fields: the field names corresponding to each row in
2863
      the data field
2864
  @param separator: the separator to be used; if this is None,
2865
      the default 'smart' algorithm is used which computes optimal
2866
      field width, otherwise just the separator is used between
2867
      each field
2868
  @type data: list
2869
  @param data: a list of lists, each sublist being one row to be output
2870
  @type numfields: list
2871
  @param numfields: a list with the fields that hold numeric
2872
      values and thus should be right-aligned
2873
  @type unitfields: list
2874
  @param unitfields: a list with the fields that hold numeric
2875
      values that should be formatted with the units field
2876
  @type units: string or None
2877
  @param units: the units we should use for formatting, or None for
2878
      automatic choice (human-readable for non-separator usage, otherwise
2879
      megabytes); this is a one-letter string
2880

2881
  """
2882
  if units is None:
2883
    if separator:
2884
      units = "m"
2885
    else:
2886
      units = "h"
2887

    
2888
  if numfields is None:
2889
    numfields = []
2890
  if unitfields is None:
2891
    unitfields = []
2892

    
2893
  numfields = utils.FieldSet(*numfields)   # pylint: disable=W0142
2894
  unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142
2895

    
2896
  format_fields = []
2897
  for field in fields:
2898
    if headers and field not in headers:
2899
      # TODO: handle better unknown fields (either revert to old
2900
      # style of raising exception, or deal more intelligently with
2901
      # variable fields)
2902
      headers[field] = field
2903
    if separator is not None:
2904
      format_fields.append("%s")
2905
    elif numfields.Matches(field):
2906
      format_fields.append("%*s")
2907
    else:
2908
      format_fields.append("%-*s")
2909

    
2910
  if separator is None:
2911
    mlens = [0 for name in fields]
2912
    format_str = " ".join(format_fields)
2913
  else:
2914
    format_str = separator.replace("%", "%%").join(format_fields)
2915

    
2916
  for row in data:
2917
    if row is None:
2918
      continue
2919
    for idx, val in enumerate(row):
2920
      if unitfields.Matches(fields[idx]):
2921
        try:
2922
          val = int(val)
2923
        except (TypeError, ValueError):
2924
          pass
2925
        else:
2926
          val = row[idx] = utils.FormatUnit(val, units)
2927
      val = row[idx] = str(val)
2928
      if separator is None:
2929
        mlens[idx] = max(mlens[idx], len(val))
2930

    
2931
  result = []
2932
  if headers:
2933
    args = []
2934
    for idx, name in enumerate(fields):
2935
      hdr = headers[name]
2936
      if separator is None:
2937
        mlens[idx] = max(mlens[idx], len(hdr))
2938
        args.append(mlens[idx])
2939
      args.append(hdr)
2940
    result.append(format_str % tuple(args))
2941

    
2942
  if separator is None:
2943
    assert len(mlens) == len(fields)
2944

    
2945
    if fields and not numfields.Matches(fields[-1]):
2946
      mlens[-1] = 0
2947

    
2948
  for line in data:
2949
    args = []
2950
    if line is None:
2951
      line = ["-" for _ in fields]
2952
    for idx in range(len(fields)):
2953
      if separator is None:
2954
        args.append(mlens[idx])
2955
      args.append(line[idx])
2956
    result.append(format_str % tuple(args))
2957

    
2958
  return result
2959

    
2960

    
2961
def _FormatBool(value):
2962
  """Formats a boolean value as a string.
2963

2964
  """
2965
  if value:
2966
    return "Y"
2967
  return "N"
2968

    
2969

    
2970
#: Default formatting for query results; (callback, align right)
2971
_DEFAULT_FORMAT_QUERY = {
2972
  constants.QFT_TEXT: (str, False),
2973
  constants.QFT_BOOL: (_FormatBool, False),
2974
  constants.QFT_NUMBER: (str, True),
2975
  constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2976
  constants.QFT_OTHER: (str, False),
2977
  constants.QFT_UNKNOWN: (str, False),
2978
  }
2979

    
2980

    
2981
def _GetColumnFormatter(fdef, override, unit):
2982
  """Returns formatting function for a field.
2983

2984
  @type fdef: L{objects.QueryFieldDefinition}
2985
  @type override: dict
2986
  @param override: Dictionary for overriding field formatting functions,
2987
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2988
  @type unit: string
2989
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}
2990
  @rtype: tuple; (callable, bool)
2991
  @return: Returns the function to format a value (takes one parameter) and a
2992
    boolean for aligning the value on the right-hand side
2993

2994
  """
2995
  fmt = override.get(fdef.name, None)
2996
  if fmt is not None:
2997
    return fmt
2998

    
2999
  assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY
3000

    
3001
  if fdef.kind == constants.QFT_UNIT:
3002
    # Can't keep this information in the static dictionary
3003
    return (lambda value: utils.FormatUnit(value, unit), True)
3004

    
3005
  fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None)
3006
  if fmt is not None:
3007
    return fmt
3008

    
3009
  raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3010

    
3011

    
3012
class _QueryColumnFormatter:
3013
  """Callable class for formatting fields of a query.
3014

3015
  """
3016
  def __init__(self, fn, status_fn, verbose):
3017
    """Initializes this class.
3018

3019
    @type fn: callable
3020
    @param fn: Formatting function
3021
    @type status_fn: callable
3022
    @param status_fn: Function to report fields' status
3023
    @type verbose: boolean
3024
    @param verbose: whether to use verbose field descriptions or not
3025

3026
    """
3027
    self._fn = fn
3028
    self._status_fn = status_fn
3029
    self._verbose = verbose
3030

    
3031
  def __call__(self, data):
3032
    """Returns a field's string representation.
3033

3034
    """
3035
    (status, value) = data
3036

    
3037
    # Report status
3038
    self._status_fn(status)
3039

    
3040
    if status == constants.RS_NORMAL:
3041
      return self._fn(value)
3042

    
3043
    assert value is None, \
3044
           "Found value %r for abnormal status %s" % (value, status)
3045

    
3046
    return FormatResultError(status, self._verbose)
3047

    
3048

    
3049
def FormatResultError(status, verbose):
3050
  """Formats result status other than L{constants.RS_NORMAL}.
3051

3052
  @param status: The result status
3053
  @type verbose: boolean
3054
  @param verbose: Whether to return the verbose text
3055
  @return: Text of result status
3056

3057
  """
3058
  assert status != constants.RS_NORMAL, \
3059
         "FormatResultError called with status equal to constants.RS_NORMAL"
3060
  try:
3061
    (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status]
3062
  except KeyError:
3063
    raise NotImplementedError("Unknown status %s" % status)
3064
  else:
3065
    if verbose:
3066
      return verbose_text
3067
    return normal_text
3068

    
3069

    
3070
def FormatQueryResult(result, unit=None, format_override=None, separator=None,
3071
                      header=False, verbose=False):
3072
  """Formats data in L{objects.QueryResponse}.
3073

3074
  @type result: L{objects.QueryResponse}
3075
  @param result: result of query operation
3076
  @type unit: string
3077
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT},
3078
    see L{utils.text.FormatUnit}
3079
  @type format_override: dict
3080
  @param format_override: Dictionary for overriding field formatting functions,
3081
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3082
  @type separator: string or None
3083
  @param separator: String used to separate fields
3084
  @type header: bool
3085
  @param header: Whether to output header row
3086
  @type verbose: boolean
3087
  @param verbose: whether to use verbose field descriptions or not
3088

3089
  """
3090
  if unit is None:
3091
    if separator:
3092
      unit = "m"
3093
    else:
3094
      unit = "h"
3095

    
3096
  if format_override is None:
3097
    format_override = {}
3098

    
3099
  stats = dict.fromkeys(constants.RS_ALL, 0)
3100

    
3101
  def _RecordStatus(status):
3102
    if status in stats:
3103
      stats[status] += 1
3104

    
3105
  columns = []
3106
  for fdef in result.fields:
3107
    assert fdef.title and fdef.name
3108
    (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
3109
    columns.append(TableColumn(fdef.title,
3110
                               _QueryColumnFormatter(fn, _RecordStatus,
3111
                                                     verbose),
3112
                               align_right))
3113

    
3114
  table = FormatTable(result.data, columns, header, separator)
3115

    
3116
  # Collect statistics
3117
  assert len(stats) == len(constants.RS_ALL)
3118
  assert compat.all(count >= 0 for count in stats.values())
3119

    
3120
  # Determine overall status. If there was no data, unknown fields must be
3121
  # detected via the field definitions.
3122
  if (stats[constants.RS_UNKNOWN] or
3123
      (not result.data and _GetUnknownFields(result.fields))):
3124
    status = QR_UNKNOWN
3125
  elif compat.any(count > 0 for key, count in stats.items()
3126
                  if key != constants.RS_NORMAL):
3127
    status = QR_INCOMPLETE
3128
  else:
3129
    status = QR_NORMAL
3130

    
3131
  return (status, table)
3132

    
3133

    
3134
def _GetUnknownFields(fdefs):
3135
  """Returns list of unknown fields included in C{fdefs}.
3136

3137
  @type fdefs: list of L{objects.QueryFieldDefinition}
3138

3139
  """
3140
  return [fdef for fdef in fdefs
3141
          if fdef.kind == constants.QFT_UNKNOWN]
3142

    
3143

    
3144
def _WarnUnknownFields(fdefs):
3145
  """Prints a warning to stderr if a query included unknown fields.
3146

3147
  @type fdefs: list of L{objects.QueryFieldDefinition}
3148

3149
  """
3150
  unknown = _GetUnknownFields(fdefs)
3151
  if unknown:
3152
    ToStderr("Warning: Queried for unknown fields %s",
3153
             utils.CommaJoin(fdef.name for fdef in unknown))
3154
    return True
3155

    
3156
  return False
3157

    
3158

    
3159
def GenericList(resource, fields, names, unit, separator, header, cl=None,
3160
                format_override=None, verbose=False, force_filter=False,
3161
                namefield=None, qfilter=None, isnumeric=False):
3162
  """Generic implementation for listing all items of a resource.
3163

3164
  @param resource: One of L{constants.QR_VIA_LUXI}
3165
  @type fields: list of strings
3166
  @param fields: List of fields to query for
3167
  @type names: list of strings
3168
  @param names: Names of items to query for
3169
  @type unit: string or None
3170
  @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
3171
    None for automatic choice (human-readable for non-separator usage,
3172
    otherwise megabytes); this is a one-letter string
3173
  @type separator: string or None
3174
  @param separator: String used to separate fields
3175
  @type header: bool
3176
  @param header: Whether to show header row
3177
  @type force_filter: bool
3178
  @param force_filter: Whether to always treat names as filter
3179
  @type format_override: dict
3180
  @param format_override: Dictionary for overriding field formatting functions,
3181
    indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
3182
  @type verbose: boolean
3183
  @param verbose: whether to use verbose field descriptions or not
3184
  @type namefield: string
3185
  @param namefield: Name of field to use for simple filters (see
3186
    L{qlang.MakeFilter} for details)
3187
  @type qfilter: list or None
3188
  @param qfilter: Query filter (in addition to names)
3189
  @param isnumeric: bool
3190
  @param isnumeric: Whether the namefield's type is numeric, and therefore
3191
    any simple filters built by namefield should use integer values to
3192
    reflect that
3193

3194
  """
3195
  if not names:
3196
    names = None
3197

    
3198
  namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
3199
                                isnumeric=isnumeric)
3200

    
3201
  if qfilter is None:
3202
    qfilter = namefilter
3203
  elif namefilter is not None:
3204
    qfilter = [qlang.OP_AND, namefilter, qfilter]
3205

    
3206
  if cl is None:
3207
    cl = GetClient()
3208

    
3209
  response = cl.Query(resource, fields, qfilter)
3210

    
3211
  found_unknown = _WarnUnknownFields(response.fields)
3212

    
3213
  (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
3214
                                     header=header,
3215
                                     format_override=format_override,
3216
                                     verbose=verbose)
3217

    
3218
  for line in data:
3219
    ToStdout(line)
3220

    
3221
  assert ((found_unknown and status == QR_UNKNOWN) or
3222
          (not found_unknown and status != QR_UNKNOWN))
3223

    
3224
  if status == QR_UNKNOWN:
3225
    return constants.EXIT_UNKNOWN_FIELD
3226

    
3227
  # TODO: Should the list command fail if not all data could be collected?
3228
  return constants.EXIT_SUCCESS
3229

    
3230

    
3231
def _FieldDescValues(fdef):
3232
  """Helper function for L{GenericListFields} to get query field description.
3233

3234
  @type fdef: L{objects.QueryFieldDefinition}
3235
  @rtype: list
3236

3237
  """
3238
  return [
3239
    fdef.name,
3240
    _QFT_NAMES.get(fdef.kind, fdef.kind),
3241
    fdef.title,
3242
    fdef.doc,
3243
    ]
3244

    
3245

    
3246
def GenericListFields(resource, fields, separator, header, cl=None):
3247
  """Generic implementation for listing fields for a resource.
3248

3249
  @param resource: One of L{constants.QR_VIA_LUXI}
3250
  @type fields: list of strings
3251
  @param fields: List of fields to query for
3252
  @type separator: string or None
3253
  @param separator: String used to separate fields
3254
  @type header: bool
3255
  @param header: Whether to show header row
3256

3257
  """
3258
  if cl is None:
3259
    cl = GetClient()
3260

    
3261
  if not fields:
3262
    fields = None
3263

    
3264
  response = cl.QueryFields(resource, fields)
3265

    
3266
  found_unknown = _WarnUnknownFields(response.fields)
3267

    
3268
  columns = [
3269
    TableColumn("Name", str, False),
3270
    TableColumn("Type", str, False),
3271
    TableColumn("Title", str, False),
3272
    TableColumn("Description", str, False),
3273
    ]
3274

    
3275
  rows = map(_FieldDescValues, response.fields)
3276

    
3277
  for line in FormatTable(rows, columns, header, separator):
3278
    ToStdout(line)
3279

    
3280
  if found_unknown:
3281
    return constants.EXIT_UNKNOWN_FIELD
3282

    
3283
  return constants.EXIT_SUCCESS
3284

    
3285

    
3286
class TableColumn:
3287
  """Describes a column for L{FormatTable}.
3288

3289
  """
3290
  def __init__(self, title, fn, align_right):
3291
    """Initializes this class.
3292

3293
    @type title: string
3294
    @param title: Column title
3295
    @type fn: callable
3296
    @param fn: Formatting function
3297
    @type align_right: bool
3298
    @param align_right: Whether to align values on the right-hand side
3299

3300
    """
3301
    self.title = title
3302
    self.format = fn
3303
    self.align_right = align_right
3304

    
3305

    
3306
def _GetColFormatString(width, align_right):
3307
  """Returns the format string for a field.
3308

3309
  """
3310
  if align_right:
3311
    sign = ""
3312
  else:
3313
    sign = "-"
3314

    
3315
  return "%%%s%ss" % (sign, width)
3316

    
3317

    
3318
def FormatTable(rows, columns, header, separator):
3319
  """Formats data as a table.
3320

3321
  @type rows: list of lists
3322
  @param rows: Row data, one list per row
3323
  @type columns: list of L{TableColumn}
3324
  @param columns: Column descriptions
3325
  @type header: bool
3326
  @param header: Whether to show header row
3327
  @type separator: string or None
3328
  @param separator: String used to separate columns
3329

3330
  """
3331
  if header:
3332
    data = [[col.title for col in columns]]
3333
    colwidth = [len(col.title) for col in columns]
3334
  else:
3335
    data = []
3336
    colwidth = [0 for _ in columns]
3337

    
3338
  # Format row data
3339
  for row in rows:
3340
    assert len(row) == len(columns)
3341

    
3342
    formatted = [col.format(value) for value, col in zip(row, columns)]
3343

    
3344
    if separator is None:
3345
      # Update column widths
3346
      for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)):
3347
        # Modifying a list's items while iterating is fine
3348
        colwidth[idx] = max(oldwidth, len(value))
3349

    
3350
    data.append(formatted)
3351

    
3352
  if separator is not None:
3353
    # Return early if a separator is used
3354
    return [separator.join(row) for row in data]
3355

    
3356
  if columns and not columns[-1].align_right:
3357
    # Avoid unnecessary spaces at end of line
3358
    colwidth[-1] = 0
3359

    
3360
  # Build format string
3361
  fmt = " ".join([_GetColFormatString(width, col.align_right)
3362
                  for col, width in zip(columns, colwidth)])
3363

    
3364
  return [fmt % tuple(row) for row in data]
3365

    
3366

    
3367
def FormatTimestamp(ts):
3368
  """Formats a given timestamp.
3369

3370
  @type ts: timestamp
3371
  @param ts: a timeval-type timestamp, a tuple of seconds and microseconds
3372

3373
  @rtype: string
3374
  @return: a string with the formatted timestamp
3375

3376
  """
3377
  if not isinstance(ts, (tuple, list)) or len(ts) != 2:
3378
    return "?"
3379

    
3380
  (sec, usecs) = ts
3381
  return utils.FormatTime(sec, usecs=usecs)
3382

    
3383

    
3384
def ParseTimespec(value):
3385
  """Parse a time specification.
3386

3387
  The following suffixed will be recognized:
3388

3389
    - s: seconds
3390
    - m: minutes
3391
    - h: hours
3392
    - d: day
3393
    - w: weeks
3394

3395
  Without any suffix, the value will be taken to be in seconds.
3396

3397
  """
3398
  value = str(value)
3399
  if not value:
3400
    raise errors.OpPrereqError("Empty time specification passed",
3401
                               errors.ECODE_INVAL)
3402
  suffix_map = {
3403
    "s": 1,
3404
    "m": 60,
3405
    "h": 3600,
3406
    "d": 86400,
3407
    "w": 604800,
3408
    }
3409
  if value[-1] not in suffix_map:
3410
    try:
3411
      value = int(value)
3412
    except (TypeError, ValueError):
3413
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3414
                                 errors.ECODE_INVAL)
3415
  else:
3416
    multiplier = suffix_map[value[-1]]
3417
    value = value[:-1]
3418
    if not value: # no data left after stripping the suffix
3419
      raise errors.OpPrereqError("Invalid time specification (only"
3420
                                 " suffix passed)", errors.ECODE_INVAL)
3421
    try:
3422
      value = int(value) * multiplier
3423
    except (TypeError, ValueError):
3424
      raise errors.OpPrereqError("Invalid time specification '%s'" % value,
3425
                                 errors.ECODE_INVAL)
3426
  return value
3427

    
3428

    
3429
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
3430
                   filter_master=False, nodegroup=None):
3431
  """Returns the names of online nodes.
3432

3433
  This function will also log a warning on stderr with the names of
3434
  the online nodes.
3435

3436
  @param nodes: if not empty, use only this subset of nodes (minus the
3437
      offline ones)
3438
  @param cl: if not None, luxi client to use
3439
  @type nowarn: boolean
3440
  @param nowarn: by default, this function will output a note with the
3441
      offline nodes that are skipped; if this parameter is True the
3442
      note is not displayed
3443
  @type secondary_ips: boolean
3444
  @param secondary_ips: if True, return the secondary IPs instead of the
3445
      names, useful for doing network traffic over the replication interface
3446
      (if any)
3447
  @type filter_master: boolean
3448
  @param filter_master: if True, do not return the master node in the list
3449
      (useful in coordination with secondary_ips where we cannot check our
3450
      node name against the list)
3451
  @type nodegroup: string
3452
  @param nodegroup: If set, only return nodes in this node group
3453

3454
  """
3455
  if cl is None:
3456
    cl = GetClient()
3457

    
3458
  qfilter = []
3459

    
3460
  if nodes:
3461
    qfilter.append(qlang.MakeSimpleFilter("name", nodes))
3462

    
3463
  if nodegroup is not None:
3464
    qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
3465
                                 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
3466

    
3467
  if filter_master:
3468
    qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
3469

    
3470
  if qfilter:
3471
    if len(qfilter) > 1:
3472
      final_filter = [qlang.OP_AND] + qfilter
3473
    else:
3474
      assert len(qfilter) == 1
3475
      final_filter = qfilter[0]
3476
  else:
3477
    final_filter = None
3478

    
3479
  result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
3480

    
3481
  def _IsOffline(row):
3482
    (_, (_, offline), _) = row
3483
    return offline
3484

    
3485
  def _GetName(row):
3486
    ((_, name), _, _) = row
3487
    return name
3488

    
3489
  def _GetSip(row):
3490
    (_, _, (_, sip)) = row
3491
    return sip
3492

    
3493
  (offline, online) = compat.partition(result.data, _IsOffline)
3494

    
3495
  if offline and not nowarn:
3496
    ToStderr("Note: skipping offline node(s): %s" %
3497
             utils.CommaJoin(map(_GetName, offline)))
3498

    
3499
  if secondary_ips:
3500
    fn = _GetSip
3501
  else:
3502
    fn = _GetName
3503

    
3504
  return map(fn, online)
3505

    
3506

    
3507
def _ToStream(stream, txt, *args):
3508
  """Write a message to a stream, bypassing the logging system
3509

3510
  @type stream: file object
3511
  @param stream: the file to which we should write
3512
  @type txt: str
3513
  @param txt: the message
3514

3515
  """
3516
  try:
3517
    if args:
3518
      args = tuple(args)
3519
      stream.write(txt % args)
3520
    else:
3521
      stream.write(txt)
3522
    stream.write("\n")
3523
    stream.flush()
3524
  except IOError, err:
3525
    if err.errno == errno.EPIPE:
3526
      # our terminal went away, we'll exit
3527
      sys.exit(constants.EXIT_FAILURE)
3528
    else:
3529
      raise
3530

    
3531

    
3532
def ToStdout(txt, *args):
3533
  """Write a message to stdout only, bypassing the logging system
3534

3535
  This is just a wrapper over _ToStream.
3536

3537
  @type txt: str
3538
  @param txt: the message
3539

3540
  """
3541
  _ToStream(sys.stdout, txt, *args)
3542

    
3543

    
3544
def ToStderr(txt, *args):
3545
  """Write a message to stderr only, bypassing the logging system
3546

3547
  This is just a wrapper over _ToStream.
3548

3549
  @type txt: str
3550
  @param txt: the message
3551

3552
  """
3553
  _ToStream(sys.stderr, txt, *args)
3554

    
3555

    
3556
class JobExecutor(object):
3557
  """Class which manages the submission and execution of multiple jobs.
3558

3559
  Note that instances of this class should not be reused between
3560
  GetResults() calls.
3561

3562
  """
3563
  def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3564
    self.queue = []
3565
    if cl is None:
3566
      cl = GetClient()
3567
    self.cl = cl
3568
    self.verbose = verbose
3569
    self.jobs = []
3570
    self.opts = opts
3571
    self.feedback_fn = feedback_fn
3572
    self._counter = itertools.count()
3573

    
3574
  @staticmethod
3575
  def _IfName(name, fmt):
3576
    """Helper function for formatting name.
3577

3578
    """
3579
    if name:
3580
      return fmt % name
3581

    
3582
    return ""
3583

    
3584
  def QueueJob(self, name, *ops):
3585
    """Record a job for later submit.
3586

3587
    @type name: string
3588
    @param name: a description of the job, will be used in WaitJobSet
3589

3590
    """
3591
    SetGenericOpcodeOpts(ops, self.opts)
3592
    self.queue.append((self._counter.next(), name, ops))
3593

    
3594
  def AddJobId(self, name, status, job_id):
3595
    """Adds a job ID to the internal queue.
3596

3597
    """
3598
    self.jobs.append((self._counter.next(), status, job_id, name))
3599

    
3600
  def SubmitPending(self, each=False):
3601
    """Submit all pending jobs.
3602

3603
    """
3604
    if each:
3605
      results = []
3606
      for (_, _, ops) in self.queue:
3607
        # SubmitJob will remove the success status, but raise an exception if
3608
        # the submission fails, so we'll notice that anyway.
3609
        results.append([True, self.cl.SubmitJob(ops)[0]])
3610
    else:
3611
      results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
3612
    for ((status, data), (idx, name, _)) in zip(results, self.queue):
3613
      self.jobs.append((idx, status, data, name))
3614

    
3615
  def _ChooseJob(self):
3616
    """Choose a non-waiting/queued job to poll next.
3617

3618
    """
3619
    assert self.jobs, "_ChooseJob called with empty job list"
3620

    
3621
    result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
3622
                               ["status"])
3623
    assert result
3624

    
3625
    for job_data, status in zip(self.jobs, result):
3626
      if (isinstance(status, list) and status and
3627
          status[0] in (constants.JOB_STATUS_QUEUED,
3628
                        constants.JOB_STATUS_WAITING,
3629
                        constants.JOB_STATUS_CANCELING)):
3630
        # job is still present and waiting
3631
        continue
3632
      # good candidate found (either running job or lost job)
3633
      self.jobs.remove(job_data)
3634
      return job_data
3635

    
3636
    # no job found
3637
    return self.jobs.pop(0)
3638

    
3639
  def GetResults(self):
3640
    """Wait for and return the results of all jobs.
3641

3642
    @rtype: list
3643
    @return: list of tuples (success, job results), in the same order
3644
        as the submitted jobs; if a job has failed, instead of the result
3645
        there will be the error message
3646

3647
    """
3648
    if not self.jobs:
3649
      self.SubmitPending()
3650
    results = []
3651
    if self.verbose:
3652
      ok_jobs = [row[2] for row in self.jobs if row[1]]
3653
      if ok_jobs:
3654
        ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
3655

    
3656
    # first, remove any non-submitted jobs
3657
    self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
3658
    for idx, _, jid, name in failures:
3659
      ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
3660
      results.append((idx, False, jid))
3661

    
3662
    while self.jobs:
3663
      (idx, _, jid, name) = self._ChooseJob()
3664
      ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
3665
      try:
3666
        job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
3667
        success = True
3668
      except errors.JobLost, err:
3669
        _, job_result = FormatError(err)
3670
        ToStderr("Job %s%s has been archived, cannot check its result",
3671
                 jid, self._IfName(name, " for %s"))
3672
        success = False
3673
      except (errors.GenericError, luxi.ProtocolError), err:
3674
        _, job_result = FormatError(err)
3675
        success = False
3676
        # the error message will always be shown, verbose or not
3677
        ToStderr("Job %s%s has failed: %s",
3678
                 jid, self._IfName(name, " for %s"), job_result)
3679

    
3680
      results.append((idx, success, job_result))
3681

    
3682
    # sort based on the index, then drop it
3683
    results.sort()
3684
    results = [i[1:] for i in results]
3685

    
3686
    return results
3687

    
3688
  def WaitOrShow(self, wait):
3689
    """Wait for job results or only print the job IDs.
3690

3691
    @type wait: boolean
3692
    @param wait: whether to wait or not
3693

3694
    """
3695
    if wait:
3696
      return self.GetResults()
3697
    else:
3698
      if not self.jobs:
3699
        self.SubmitPending()
3700
      for _, status, result, name in self.jobs:
3701
        if status:
3702
          ToStdout("%s: %s", result, name)
3703
        else:
3704
          ToStderr("Failure for %s: %s", name, result)
3705
      return [row[1:3] for row in self.jobs]
3706

    
3707

    
3708
def FormatParamsDictInfo(param_dict, actual):
3709
  """Formats a parameter dictionary.
3710

3711
  @type param_dict: dict
3712
  @param param_dict: the own parameters
3713
  @type actual: dict
3714
  @param actual: the current parameter set (including defaults)
3715
  @rtype: dict
3716
  @return: dictionary where the value of each parameter is either a fully
3717
      formatted string or a dictionary containing formatted strings
3718

3719
  """
3720
  ret = {}
3721
  for (key, data) in actual.items():
3722
    if isinstance(data, dict) and data:
3723
      ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data)
3724
    else:
3725
      ret[key] = str(param_dict.get(key, "default (%s)" % data))
3726
  return ret
3727

    
3728

    
3729
def _FormatListInfoDefault(data, def_data):
3730
  if data is not None:
3731
    ret = utils.CommaJoin(data)
3732
  else:
3733
    ret = "default (%s)" % utils.CommaJoin(def_data)
3734
  return ret
3735

    
3736

    
3737
def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3738
  """Formats an instance policy.
3739

3740
  @type custom_ipolicy: dict
3741
  @param custom_ipolicy: own policy
3742
  @type eff_ipolicy: dict
3743
  @param eff_ipolicy: effective policy (including defaults); ignored for
3744
      cluster
3745
  @type iscluster: bool
3746
  @param iscluster: the policy is at cluster level
3747
  @rtype: list of pairs
3748
  @return: formatted data, suitable for L{PrintGenericInfo}
3749

3750
  """
3751
  if iscluster:
3752
    eff_ipolicy = custom_ipolicy
3753

    
3754
  minmax_out = []
3755
  custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX)
3756
  if custom_minmax:
3757
    for (k, minmax) in enumerate(custom_minmax):
3758
      minmax_out.append([
3759
        ("%s/%s" % (key, k),
3760
         FormatParamsDictInfo(minmax[key], minmax[key]))
3761
        for key in constants.ISPECS_MINMAX_KEYS
3762
        ])
3763
  else:
3764
    for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]):
3765
      minmax_out.append([
3766
        ("%s/%s" % (key, k),
3767
         FormatParamsDictInfo({}, minmax[key]))
3768
        for key in constants.ISPECS_MINMAX_KEYS
3769
        ])
3770
  ret = [("bounds specs", minmax_out)]
3771

    
3772
  if iscluster:
3773
    stdspecs = custom_ipolicy[constants.ISPECS_STD]
3774
    ret.append(
3775
      (constants.ISPECS_STD,
3776
       FormatParamsDictInfo(stdspecs, stdspecs))
3777
      )
3778

    
3779
  ret.append(
3780
    ("allowed disk templates",
3781
     _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS),
3782
                            eff_ipolicy[constants.IPOLICY_DTS]))
3783
    )
3784
  ret.extend([
3785
    (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key])))
3786
    for key in constants.IPOLICY_PARAMETERS
3787
    ])
3788
  return ret
3789

    
3790

    
3791
def _PrintSpecsParameters(buf, specs):
3792
  values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
3793
  buf.write(",".join(values))
3794

    
3795

    
3796
def PrintIPolicyCommand(buf, ipolicy, isgroup):
3797
  """Print the command option used to generate the given instance policy.
3798

3799
  Currently only the parts dealing with specs are supported.
3800

3801
  @type buf: StringIO
3802
  @param buf: stream to write into
3803
  @type ipolicy: dict
3804
  @param ipolicy: instance policy
3805
  @type isgroup: bool
3806
  @param isgroup: whether the policy is at group level
3807

3808
  """
3809
  if not isgroup:
3810
    stdspecs = ipolicy.get("std")
3811
    if stdspecs:
3812
      buf.write(" %s " % IPOLICY_STD_SPECS_STR)
3813
      _PrintSpecsParameters(buf, stdspecs)
3814
  minmaxes = ipolicy.get("minmax", [])
3815
  first = True
3816
  for minmax in minmaxes:
3817
    minspecs = minmax.get("min")
3818
    maxspecs = minmax.get("max")
3819
    if minspecs and maxspecs:
3820
      if first:
3821
        buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
3822
        first = False
3823
      else:
3824
        buf.write("//")
3825
      buf.write("min:")
3826
      _PrintSpecsParameters(buf, minspecs)
3827
      buf.write("/max:")
3828
      _PrintSpecsParameters(buf, maxspecs)
3829

    
3830

    
3831
def ConfirmOperation(names, list_type, text, extra=""):
3832
  """Ask the user to confirm an operation on a list of list_type.
3833

3834
  This function is used to request confirmation for doing an operation
3835
  on a given list of list_type.
3836

3837
  @type names: list
3838
  @param names: the list of names that we display when
3839
      we ask for confirmation
3840
  @type list_type: str
3841
  @param list_type: Human readable name for elements in the list (e.g. nodes)
3842
  @type text: str
3843
  @param text: the operation that the user should confirm
3844
  @rtype: boolean
3845
  @return: True or False depending on user's confirmation.
3846

3847
  """
3848
  count = len(names)
3849
  msg = ("The %s will operate on %d %s.\n%s"
3850
         "Do you want to continue?" % (text, count, list_type, extra))
3851
  affected = (("\nAffected %s:\n" % list_type) +
3852
              "\n".join(["  %s" % name for name in names]))
3853

    
3854
  choices = [("y", True, "Yes, execute the %s" % text),
3855
             ("n", False, "No, abort the %s" % text)]
3856

    
3857
  if count > 20:
3858
    choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
3859
    question = msg
3860
  else:
3861
    question = msg + affected
3862

    
3863
  choice = AskUser(question, choices)
3864
  if choice == "v":
3865
    choices.pop(1)
3866
    choice = AskUser(msg + affected, choices)
3867
  return choice
3868

    
3869

    
3870
def _MaybeParseUnit(elements):
3871
  """Parses and returns an array of potential values with units.
3872

3873
  """
3874
  parsed = {}
3875
  for k, v in elements.items():
3876
    if v == constants.VALUE_DEFAULT:
3877
      parsed[k] = v
3878
    else:
3879
      parsed[k] = utils.ParseUnit(v)
3880
  return parsed
3881

    
3882

    
3883
def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
3884
                             ispecs_disk_count, ispecs_disk_size,
3885
                             ispecs_nic_count, group_ipolicy, fill_all):
3886
  try:
3887
    if ispecs_mem_size:
3888
      ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
3889
    if ispecs_disk_size:
3890
      ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
3891
  except (TypeError, ValueError, errors.UnitParseError), err:
3892
    raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
3893
                               " in policy: %s" %
3894
                               (ispecs_disk_size, ispecs_mem_size, err),
3895
                               errors.ECODE_INVAL)
3896

    
3897
  # prepare ipolicy dict
3898
  ispecs_transposed = {
3899
    constants.ISPEC_MEM_SIZE: ispecs_mem_size,
3900
    constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
3901
    constants.ISPEC_DISK_COUNT: ispecs_disk_count,
3902
    constants.ISPEC_DISK_SIZE: ispecs_disk_size,
3903
    constants.ISPEC_NIC_COUNT: ispecs_nic_count,
3904
    }
3905

    
3906
  # first, check that the values given are correct
3907
  if group_ipolicy:
3908
    forced_type = TISPECS_GROUP_TYPES
3909
  else:
3910
    forced_type = TISPECS_CLUSTER_TYPES
3911
  for specs in ispecs_transposed.values():
3912
    assert type(specs) is dict
3913
    utils.ForceDictType(specs, forced_type)
3914

    
3915
  # then transpose
3916
  ispecs = {
3917
    constants.ISPECS_MIN: {},
3918
    constants.ISPECS_MAX: {},
3919
    constants.ISPECS_STD: {},
3920
    }
3921
  for (name, specs) in ispecs_transposed.iteritems():
3922
    assert name in constants.ISPECS_PARAMETERS
3923
    for key, val in specs.items(): # {min: .. ,max: .., std: ..}
3924
      assert key in ispecs
3925
      ispecs[key][name] = val
3926
  minmax_out = {}
3927
  for key in constants.ISPECS_MINMAX_KEYS:
3928
    if fill_all:
3929
      minmax_out[key] = \
3930
        objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
3931
    else:
3932
      minmax_out[key] = ispecs[key]
3933
  ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
3934
  if fill_all:
3935
    ipolicy[constants.ISPECS_STD] = \
3936
        objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
3937
                         ispecs[constants.ISPECS_STD])
3938
  else:
3939
    ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3940

    
3941

    
3942
def _ParseSpecUnit(spec, keyname):
3943
  ret = spec.copy()
3944
  for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]:
3945
    if k in ret:
3946
      try:
3947
        ret[k] = utils.ParseUnit(ret[k])
3948
      except (TypeError, ValueError, errors.UnitParseError), err:
3949
        raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance"
3950
                                    " specs: %s" % (k, ret[k], keyname, err)),
3951
                                   errors.ECODE_INVAL)
3952
  return ret
3953

    
3954

    
3955
def _ParseISpec(spec, keyname, required):
3956
  ret = _ParseSpecUnit(spec, keyname)
3957
  utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES)
3958
  missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys())
3959
  if required and missing:
3960
    raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" %
3961
                               (keyname, utils.CommaJoin(missing)),
3962
                               errors.ECODE_INVAL)
3963
  return ret
3964

    
3965

    
3966
def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
3967
  ret = None
3968
  if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
3969
      len(minmax_ispecs[0]) == 1):
3970
    for (key, spec) in minmax_ispecs[0].items():
3971
      # This loop is executed exactly once
3972
      if key in allowed_values and not spec:
3973
        ret = key
3974
  return ret
3975

    
3976

    
3977
def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
3978
                            group_ipolicy, allowed_values):
3979
  found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values)
3980
  if found_allowed is not None:
3981
    ipolicy_out[constants.ISPECS_MINMAX] = found_allowed
3982
  elif minmax_ispecs is not None:
3983
    minmax_out = []
3984
    for mmpair in minmax_ispecs:
3985
      mmpair_out = {}
3986
      for (key, spec) in mmpair.items():
3987
        if key not in constants.ISPECS_MINMAX_KEYS:
3988
          msg = "Invalid key in bounds instance specifications: %s" % key
3989
          raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3990
        mmpair_out[key] = _ParseISpec(spec, key, True)
3991
      minmax_out.append(mmpair_out)
3992
    ipolicy_out[constants.ISPECS_MINMAX] = minmax_out
3993
  if std_ispecs is not None:
3994
    assert not group_ipolicy # This is not an option for gnt-group
3995
    ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
3996

    
3997

    
3998
def CreateIPolicyFromOpts(ispecs_mem_size=None,
3999
                          ispecs_cpu_count=None,
4000
                          ispecs_disk_count=None,
4001
                          ispecs_disk_size=None,
4002
                          ispecs_nic_count=None,
4003
                          minmax_ispecs=None,
4004
                          std_ispecs=None,
4005
                          ipolicy_disk_templates=None,
4006
                          ipolicy_vcpu_ratio=None,
4007
                          ipolicy_spindle_ratio=None,
4008
                          group_ipolicy=False,
4009
                          allowed_values=None,
4010
                          fill_all=False):
4011
  """Creation of instance policy based on command line options.
4012

4013
  @param fill_all: whether for cluster policies we should ensure that
4014
    all values are filled
4015

4016
  """
4017
  assert not (fill_all and allowed_values)
4018

    
4019
  split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
4020
                 ispecs_disk_size or ispecs_nic_count)
4021
  if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
4022
    raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
4023
                               " together with any --ipolicy-xxx-specs option",
4024
                               errors.ECODE_INVAL)
4025

    
4026
  ipolicy_out = objects.MakeEmptyIPolicy()
4027
  if split_specs:
4028
    assert fill_all
4029
    _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
4030
                             ispecs_disk_count, ispecs_disk_size,
4031
                             ispecs_nic_count, group_ipolicy, fill_all)
4032
  elif (minmax_ispecs is not None or std_ispecs is not None):
4033
    _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
4034
                            group_ipolicy, allowed_values)
4035

    
4036
  if ipolicy_disk_templates is not None:
4037
    if allowed_values and ipolicy_disk_templates in allowed_values:
4038
      ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
4039
    else:
4040
      ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
4041
  if ipolicy_vcpu_ratio is not None:
4042
    ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
4043
  if ipolicy_spindle_ratio is not None:
4044
    ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
4045

    
4046
  assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
4047

    
4048
  if not group_ipolicy and fill_all:
4049
    ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
4050

    
4051
  return ipolicy_out
4052

    
4053

    
4054
def _SerializeGenericInfo(buf, data, level, afterkey=False):
4055
  """Formatting core of L{PrintGenericInfo}.
4056

4057
  @param buf: (string) stream to accumulate the result into
4058
  @param data: data to format
4059
  @type level: int
4060
  @param level: depth in the data hierarchy, used for indenting
4061
  @type afterkey: bool
4062
  @param afterkey: True when we are in the middle of a line after a key (used
4063
      to properly add newlines or indentation)
4064

4065
  """
4066
  baseind = "  "
4067
  if isinstance(data, dict):
4068
    if not data:
4069
      buf.write("\n")
4070
    else:
4071
      if afterkey:
4072
        buf.write("\n")
4073
        doindent = True
4074
      else:
4075
        doindent = False
4076
      for key in sorted(data):
4077
        if doindent:
4078
          buf.write(baseind * level)
4079
        else:
4080
          doindent = True
4081
        buf.write(key)
4082
        buf.write(": ")
4083
        _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
4084
  elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
4085
    # list of tuples (an ordered dictionary)
4086
    if afterkey:
4087
      buf.write("\n")
4088
      doindent = True
4089
    else:
4090
      doindent = False
4091
    for (key, val) in data:
4092
      if doindent:
4093
        buf.write(baseind * level)
4094
      else:
4095
        doindent = True
4096
      buf.write(key)
4097
      buf.write(": ")
4098
      _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
4099
  elif isinstance(data, list):
4100
    if not data:
4101
      buf.write("\n")
4102
    else:
4103
      if afterkey:
4104
        buf.write("\n")
4105
        doindent = True
4106
      else:
4107
        doindent = False
4108
      for item in data:
4109
        if doindent:
4110
          buf.write(baseind * level)
4111
        else:
4112
          doindent = True
4113
        buf.write("-")
4114
        buf.write(baseind[1:])
4115
        _SerializeGenericInfo(buf, item, level + 1)
4116
  else:
4117
    # This branch should be only taken for strings, but it's practically
4118
    # impossible to guarantee that no other types are produced somewhere
4119
    buf.write(str(data))
4120
    buf.write("\n")
4121

    
4122

    
4123
def PrintGenericInfo(data):
4124
  """Print information formatted according to the hierarchy.
4125

4126
  The output is a valid YAML string.
4127

4128
  @param data: the data to print. It's a hierarchical structure whose elements
4129
      can be:
4130
        - dictionaries, where keys are strings and values are of any of the
4131
          types listed here
4132
        - lists of pairs (key, value), where key is a string and value is of
4133
          any of the types listed here; it's a way to encode ordered
4134
          dictionaries
4135
        - lists of any of the types listed here
4136
        - strings
4137

4138
  """
4139
  buf = StringIO()
4140
  _SerializeGenericInfo(buf, data, 0)
4141
  ToStdout(buf.getvalue().rstrip("\n"))